mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
352 Commits
release-2.
...
v2.7.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cbee7e6011 | ||
|
|
8e61f64cc9 | ||
|
|
e413db45b1 | ||
|
|
26cf7d95b7 | ||
|
|
24bd4aee70 | ||
|
|
5e543518db | ||
|
|
72a69e2f16 | ||
|
|
c592219140 | ||
|
|
155b6a9c10 | ||
|
|
29c485778a | ||
|
|
a707ab6b0e | ||
|
|
d6e5768417 | ||
|
|
428d47ba8a | ||
|
|
1adbebf888 | ||
|
|
6ec093dcb6 | ||
|
|
daa9d4e13e | ||
|
|
bd9ef3fbde | ||
|
|
a29a2b13d1 | ||
|
|
483d26b113 | ||
|
|
21e2400b83 | ||
|
|
52de54a799 | ||
|
|
f35c127e2c | ||
|
|
0edc7c5ef1 | ||
|
|
d232635ebe | ||
|
|
5f1fc31ed0 | ||
|
|
0d0d2a97bb | ||
|
|
82f006c014 | ||
|
|
6e9f28c00c | ||
|
|
8f1b5d54fa | ||
|
|
06cd025119 | ||
|
|
707cab1f80 | ||
|
|
cc0072c886 | ||
|
|
21ce221051 | ||
|
|
27ba952e91 | ||
|
|
5d5341515b | ||
|
|
04db457b56 | ||
|
|
100495d39b | ||
|
|
3a28c8a18c | ||
|
|
a610fc159a | ||
|
|
f16b722620 | ||
|
|
95f9196317 | ||
|
|
019da5a7c4 | ||
|
|
304a7428d6 | ||
|
|
6bef7fb629 | ||
|
|
6c7c4ec1f7 | ||
|
|
ab5527c3ee | ||
|
|
1f4c745f8b | ||
|
|
faa7331f9d | ||
|
|
95626eeb81 | ||
|
|
90a75566fc | ||
|
|
cf870f80ca | ||
|
|
7fafcb67e1 | ||
|
|
445de54272 | ||
|
|
9e88880d81 | ||
|
|
cca5e1a427 | ||
|
|
d764cf7597 | ||
|
|
1cc1d4a217 | ||
|
|
91aff09d34 | ||
|
|
7db483b834 | ||
|
|
3fd0b8605d | ||
|
|
b1e1295b84 | ||
|
|
25badeb7a1 | ||
|
|
481ee54587 | ||
|
|
93c87c03cb | ||
|
|
bfc381e03b | ||
|
|
0227f87717 | ||
|
|
b9c09b9415 | ||
|
|
17b81807b6 | ||
|
|
70ac450b92 | ||
|
|
713c794fc1 | ||
|
|
f2d6621f90 | ||
|
|
043e5b745b | ||
|
|
e6a008b4a5 | ||
|
|
6ee8e46fa8 | ||
|
|
b9b6f6f114 | ||
|
|
f8407b8f28 | ||
|
|
fa18cb8964 | ||
|
|
aa5fd4379c | ||
|
|
3c058ea732 | ||
|
|
6050fc748f | ||
|
|
f602e8191f | ||
|
|
c8ae6e0d01 | ||
|
|
652e13637b | ||
|
|
127bd45a7e | ||
|
|
b28f3060d1 | ||
|
|
0457ce1335 | ||
|
|
33b2702baf | ||
|
|
f2810b879d | ||
|
|
2d49e96e00 | ||
|
|
2728680191 | ||
|
|
8d5b9f88d6 | ||
|
|
b652a5c8ee | ||
|
|
2279d60cdc | ||
|
|
7dacdc6fcf | ||
|
|
9b682f8fc4 | ||
|
|
4c1bb61cb0 | ||
|
|
ed6e9e92aa | ||
|
|
ab754052c8 | ||
|
|
a9c3126788 | ||
|
|
5bdd42ceed | ||
|
|
2fe132eec7 | ||
|
|
57544a6c1c | ||
|
|
156dbd6f3c | ||
|
|
e6818750d9 | ||
|
|
c2e0026de1 | ||
|
|
c41dc605b2 | ||
|
|
a07256ff94 | ||
|
|
b8fdfb5f60 | ||
|
|
f6e31395c4 | ||
|
|
558140f76d | ||
|
|
98475bc430 | ||
|
|
7b74e1993e | ||
|
|
a1cbbe46e2 | ||
|
|
0beda4d7bf | ||
|
|
9fb7847f4e | ||
|
|
133ff2989b | ||
|
|
6953b43c9b | ||
|
|
0f500a5b21 | ||
|
|
42d1c85d75 | ||
|
|
6dad9bc911 | ||
|
|
9c8f369ef7 | ||
|
|
0fee4d28f1 | ||
|
|
4b7c55a918 | ||
|
|
51c9f3efb0 | ||
|
|
c07df6759a | ||
|
|
1803f64676 | ||
|
|
152a930f4c | ||
|
|
e4e2077a46 | ||
|
|
eaad8931f3 | ||
|
|
7702d2af8a | ||
|
|
f9e6526753 | ||
|
|
00872616bc | ||
|
|
196004438d | ||
|
|
352e51fdbd | ||
|
|
13258e425f | ||
|
|
e18adb5ab3 | ||
|
|
a10fde5c24 | ||
|
|
710a0d8f8e | ||
|
|
f3a3a57227 | ||
|
|
8fd33028e4 | ||
|
|
32e6025b50 | ||
|
|
b1376e9b5f | ||
|
|
4ead2cc130 | ||
|
|
f54bc6e59f | ||
|
|
69719665d4 | ||
|
|
5b46eb8b7b | ||
|
|
4a7f01deca | ||
|
|
bc5d1eddb7 | ||
|
|
d3ff9757c4 | ||
|
|
7b7c5aeeb2 | ||
|
|
05f5e63459 | ||
|
|
53c3c49dc4 | ||
|
|
2b1d55b106 | ||
|
|
9b3d37d26a | ||
|
|
872a32b579 | ||
|
|
d24d9e3ac6 | ||
|
|
25ca9863c8 | ||
|
|
e4098180d4 | ||
|
|
743b09fcf9 | ||
|
|
0d02040d03 | ||
|
|
f1875b56e1 | ||
|
|
df3bd34646 | ||
|
|
ba86be2fcb | ||
|
|
335af2e920 | ||
|
|
9237f25755 | ||
|
|
9f9a226620 | ||
|
|
f9a5daee68 | ||
|
|
9c2f9961f6 | ||
|
|
2f2d44fe28 | ||
|
|
e75cf73316 | ||
|
|
58a3079391 | ||
|
|
e675e00a24 | ||
|
|
0e86f1018b | ||
|
|
9e11f87f2e | ||
|
|
640439fe79 | ||
|
|
15edf103f9 | ||
|
|
fa18490c83 | ||
|
|
474964dfe4 | ||
|
|
c85902c999 | ||
|
|
cbb8d2139c | ||
|
|
1ec6fa4721 | ||
|
|
5c656ae2a8 | ||
|
|
467c6c94e3 | ||
|
|
d3f73ded88 | ||
|
|
82ade0166f | ||
|
|
c20b7fe3d7 | ||
|
|
74cd4952c2 | ||
|
|
d8524e922e | ||
|
|
77ec784f2c | ||
|
|
7a979e3fc2 | ||
|
|
b637bc22bc | ||
|
|
39cb524dfa | ||
|
|
ad9e8f9702 | ||
|
|
05a8ac2497 | ||
|
|
51edf7d351 | ||
|
|
338245ef19 | ||
|
|
fbb0b99b1a | ||
|
|
974c2de168 | ||
|
|
b6cfe676f3 | ||
|
|
eb75366ebf | ||
|
|
8ae377752f | ||
|
|
a5116bed97 | ||
|
|
323f8fdf23 | ||
|
|
6d7258c5ee | ||
|
|
3d02e01339 | ||
|
|
85826e43df | ||
|
|
a3a19ab877 | ||
|
|
17167fc2e7 | ||
|
|
8e06648327 | ||
|
|
7bff6c1115 | ||
|
|
3f10af5b4c | ||
|
|
ceb043c0da | ||
|
|
c9d317ac49 | ||
|
|
22c83d3104 | ||
|
|
39c00f97ca | ||
|
|
212029d8c2 | ||
|
|
4a06cb2c74 | ||
|
|
2494657829 | ||
|
|
031ecaf754 | ||
|
|
e420666d76 | ||
|
|
02eb67a43e | ||
|
|
52d223d9ef | ||
|
|
b9f9b78b8b | ||
|
|
f847670157 | ||
|
|
9b86af18bc | ||
|
|
da46381907 | ||
|
|
b17007b7cf | ||
|
|
7dcfe1fc38 | ||
|
|
97d75a61fd | ||
|
|
048902a32e | ||
|
|
3524128d7f | ||
|
|
b600da1318 | ||
|
|
e510a77a1d | ||
|
|
06b98c5a25 | ||
|
|
4242cfc126 | ||
|
|
977b730569 | ||
|
|
1373f4fd3f | ||
|
|
9b7498ce84 | ||
|
|
c043f4ad0a | ||
|
|
1000ac15ab | ||
|
|
88b6cfcea7 | ||
|
|
3cb111545f | ||
|
|
1e6a47cd39 | ||
|
|
f9571760a3 | ||
|
|
9bd10330ff | ||
|
|
b1d0009fd9 | ||
|
|
9b994c4af0 | ||
|
|
a77d149668 | ||
|
|
062e245192 | ||
|
|
ac96269bda | ||
|
|
a4c0f3cf1f | ||
|
|
193cf76e12 | ||
|
|
afb88308fa | ||
|
|
8b4516519f | ||
|
|
adb4471569 | ||
|
|
59b00bb0c0 | ||
|
|
eaac2c636f | ||
|
|
a825aad384 | ||
|
|
4610bc831c | ||
|
|
61f6530e8a | ||
|
|
b2a6387ca0 | ||
|
|
f7011f646b | ||
|
|
3da1eaa261 | ||
|
|
b38bc0040b | ||
|
|
95da518e7d | ||
|
|
adc6f564f7 | ||
|
|
996262021b | ||
|
|
6069ded532 | ||
|
|
221a03973e | ||
|
|
c20301f27c | ||
|
|
5a6f969b83 | ||
|
|
bd8777a8f8 | ||
|
|
4a50114126 | ||
|
|
9414fb303d | ||
|
|
61f8876b99 | ||
|
|
584428edaf | ||
|
|
49412a1ea9 | ||
|
|
8895b4a83a | ||
|
|
806ab33d7c | ||
|
|
70f9de4403 | ||
|
|
c9e4bcd351 | ||
|
|
1808539652 | ||
|
|
96b0eb0f53 | ||
|
|
15b284b142 | ||
|
|
891707721f | ||
|
|
c3784349d3 | ||
|
|
2358669114 | ||
|
|
c31d15e8d6 | ||
|
|
2ddb7772d1 | ||
|
|
1ab40261c0 | ||
|
|
63e410a9fc | ||
|
|
e82a6447e2 | ||
|
|
3b05351675 | ||
|
|
cc2a27ef14 | ||
|
|
8d262f2585 | ||
|
|
88936be9ce | ||
|
|
1ccaefef1d | ||
|
|
913ac509c2 | ||
|
|
57d01a472c | ||
|
|
3c52ce1873 | ||
|
|
1c6d2806d1 | ||
|
|
720858fbbc | ||
|
|
cd3fe2d1b1 | ||
|
|
e32388066d | ||
|
|
ffff9112da | ||
|
|
efd3802856 | ||
|
|
9923159304 | ||
|
|
2f8da71a1b | ||
|
|
7f2dd9e8fc | ||
|
|
fddb06fc54 | ||
|
|
abf6d73b0c | ||
|
|
983b9ca584 | ||
|
|
1cc154f151 | ||
|
|
f6d5c31e98 | ||
|
|
842b55b6ba | ||
|
|
69b36514e3 | ||
|
|
79bcaa6001 | ||
|
|
f6abf72b6c | ||
|
|
052a5d2f69 | ||
|
|
c683ab916b | ||
|
|
8ee4387268 | ||
|
|
a454093924 | ||
|
|
35fdd38d10 | ||
|
|
55da026fd5 | ||
|
|
8f0ef8d326 | ||
|
|
2c1698022b | ||
|
|
566d50f633 | ||
|
|
50b6577345 | ||
|
|
43c928e2e5 | ||
|
|
825a0f3aba | ||
|
|
ec95d12d9f | ||
|
|
05ab30ebe1 | ||
|
|
82a1e554d5 | ||
|
|
ed4e80c95d | ||
|
|
bb33e8ca76 | ||
|
|
a50a67e2ad | ||
|
|
f8bc471801 | ||
|
|
58ce42b824 | ||
|
|
66d2f1e962 | ||
|
|
3d79487e85 | ||
|
|
0c5d4d8500 | ||
|
|
f65e0b52f5 | ||
|
|
d7baab244f | ||
|
|
231fab4e58 | ||
|
|
b2bdbedc6a | ||
|
|
72c3d0fa58 | ||
|
|
68d1a6f3d6 | ||
|
|
9e392fa432 | ||
|
|
852f62c484 | ||
|
|
8df3c19301 | ||
|
|
1d885b7eea | ||
|
|
1ad91259bd |
@@ -11,3 +11,17 @@ cmd/**/debug
|
||||
debug.test
|
||||
coverage.out
|
||||
ui/node_modules/
|
||||
test-results/
|
||||
test/
|
||||
manifests/
|
||||
hack/
|
||||
docs/
|
||||
examples/
|
||||
.github/
|
||||
!test/fixture
|
||||
!test/container
|
||||
!hack/installers
|
||||
!hack/gpg-wrapper.sh
|
||||
!hack/git-verify-wrapper.sh
|
||||
!hack/tool-versions.sh
|
||||
!hack/install.sh
|
||||
32
.github/ISSUE_TEMPLATE/release.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/release.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Argo CD Release
|
||||
about: Used by our Release Champion to track progress of a minor release
|
||||
title: 'Argo CD Release vX.X'
|
||||
labels: 'release'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
Target RC1 date: ___. __, ____
|
||||
Target GA date: ___. __, ____
|
||||
|
||||
- [ ] Create new section in the [Release Planning doc](https://docs.google.com/document/d/1trJIomcgXcfvLw0aYnERrFWfPjQOfYMDJOCh1S8nMBc/edit?usp=sharing)
|
||||
- [ ] Schedule a Release Planning meeting roughly two weeks before the scheduled Release freeze date by adding it to the community calendar (or delegate this task to someone with write access to the community calendar)
|
||||
- [ ] Include Zoom link in the invite
|
||||
- [ ] Post in #argo-cd and #argo-contributors one week before the meeting
|
||||
- [ ] Post again one hour before the meeting
|
||||
- [ ] At the meeting, remove issues/PRs from the project's column for that release which have not been “claimed” by at least one Approver (add it to the next column if Approver requests that)
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge
|
||||
- [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task)
|
||||
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Create new release branch
|
||||
- [ ] Add the release branch to ReadTheDocs
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Trigger the release
|
||||
- [ ] After the release is finished, publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements with lots of emojis announcing the release and requesting help testing
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] At release date, evaluate if any bugs justify delaying the release. If not, cut the release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After the release, post in #argo-cd that the {current minor version minus 3} has reached EOL (example: https://cloud-native.slack.com/archives/C01TSERG0KZ/p1667336234059729)
|
||||
- [ ] (For the next release champion) Review the [items scheduled for the next release](https://github.com/orgs/argoproj/projects/25). If any item does not have an assignee who can commit to finish the feature, move it to the next release.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
3
.github/cherry-pick-bot.yml
vendored
Normal file
3
.github/cherry-pick-bot.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
enabled: true
|
||||
preservePullRequestTitle: true
|
||||
|
||||
25
.github/dependabot.yml
vendored
25
.github/dependabot.yml
vendored
@@ -16,3 +16,28 @@ updates:
|
||||
directory: "/ui/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/test/container/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/test/e2e/multiarch-container/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/test/remote/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/ui-test/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
5
.github/pull_request_template.md
vendored
5
.github/pull_request_template.md
vendored
@@ -11,7 +11,10 @@ Checklist:
|
||||
* [ ] Does this PR require documentation updates?
|
||||
* [ ] I've updated documentation as required by this PR.
|
||||
* [ ] Optional. My organization is added to USERS.md.
|
||||
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)
|
||||
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/blob/master/community/CONTRIBUTING.md#legal)
|
||||
* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.
|
||||
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).
|
||||
* [ ] My new feature complies with the [feature status](https://github.com/argoproj/argoproj/blob/master/community/feature-status.md) guidelines.
|
||||
* [ ] I have added a brief description of why this PR is necessary and/or what this PR solves.
|
||||
|
||||
Please see [Contribution FAQs](https://argo-cd.readthedocs.io/en/latest/developer-guide/faq/) if you have questions about your pull-request.
|
||||
|
||||
38
.github/workflows/README.md
vendored
Normal file
38
.github/workflows/README.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# Workflows
|
||||
|
||||
| Workflow | Description |
|
||||
|--------------------|----------------------------------------------------------------|
|
||||
| ci-build.yaml | Build, lint, test, codegen, build-ui, analyze, e2e-test |
|
||||
| codeql.yaml | CodeQL analysis |
|
||||
| image-reuse.yaml | Build, push, and Sign container images |
|
||||
| image.yaml | Build container image for PR's & publish for push events |
|
||||
| pr-title-check.yaml| Lint PR for semantic information |
|
||||
| init-release.yaml | Build manifests and version then create a PR for release branch|
|
||||
| release.yaml | Build images, cli-binaries, provenances, and post actions |
|
||||
| update-snyk.yaml | Scheduled snyk reports |
|
||||
|
||||
# Reusable workflows
|
||||
|
||||
## image-reuse.yaml
|
||||
|
||||
- The resuable workflow can be used to publish or build images with multiple container registries(Quay,GHCR, dockerhub), and then sign them with cosign when an image is published.
|
||||
- A GO version `must` be specified e.g. 1.19
|
||||
- The image name for each registry *must* contain the tag. Note: multiple tags are allowed for each registry using a CSV type.
|
||||
- Multiple platforms can be specified e.g. linux/amd64,linux/arm64
|
||||
- Images are not published by default. A boolean value must be set to `true` to push images.
|
||||
- An optional target can be specified.
|
||||
|
||||
| Inputs | Description | Type | Required | Defaults |
|
||||
|-------------------|-------------------------------------|-------------|----------|-----------------|
|
||||
| go-version | Version of Go to be used | string | true | none |
|
||||
| quay_image_name | Full image name and tag | CSV, string | false | none |
|
||||
| ghcr_image_name | Full image name and tag | CSV, string | false | none |
|
||||
| docker_image_name | Full image name and tag | CSV, string | false | none |
|
||||
| platforms | Platforms to build (linux/amd64) | CSV, string | false | linux/amd64 |
|
||||
| push | Whether to push image/s to registry | boolean | false | false |
|
||||
| target | Target build stage | string | false | none |
|
||||
|
||||
| Outputs | Description | Type |
|
||||
|-------------|------------------------------------------|-------|
|
||||
|image-digest | Image digest of image container created | string|
|
||||
|
||||
91
.github/workflows/ci-build.yaml
vendored
91
.github/workflows/ci-build.yaml
vendored
@@ -9,10 +9,11 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release-*'
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.18'
|
||||
GOLANG_VERSION: '1.19'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -27,9 +28,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -45,13 +46,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # v3.0.11
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -69,15 +70,15 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
|
||||
with:
|
||||
version: v1.46.2
|
||||
version: v1.51.0
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
|
||||
test-go:
|
||||
@@ -92,11 +93,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -116,13 +117,17 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # v3.0.11
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -133,12 +138,12 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
@@ -155,11 +160,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -179,13 +184,17 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # v3.0.11
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -196,7 +205,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -206,9 +215,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -232,6 +241,10 @@ jobs:
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Run codegen
|
||||
run: |
|
||||
set -x
|
||||
@@ -250,14 +263,14 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@8c91899e586c5b171469028077307d293428b516 # v3.5.1
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
with:
|
||||
node-version: '12.18.4'
|
||||
node-version: '18.15.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # v3.0.11
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -287,12 +300,12 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # v3.0.11
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -303,11 +316,11 @@ jobs:
|
||||
run: |
|
||||
mkdir -p test-results
|
||||
- name: Get code coverage artifiact
|
||||
uses: actions/download-artifact@9782bd6a9848b53b110e712e20e42d89988822b7 # v3.0.1
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: code-coverage
|
||||
- name: Get test result artifact
|
||||
uses: actions/download-artifact@9782bd6a9848b53b110e712e20e42d89988822b7 # v3.0.1
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -348,7 +361,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.24.3, v1.23.3, v1.22.6]
|
||||
k3s-version: [v1.26.0, v1.25.4, v1.24.3, v1.23.3]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -366,22 +379,14 @@ jobs:
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
run: |
|
||||
sudo pkill mono || true
|
||||
# ubuntu-22.04 comes with kubectl, but the version is not pinned. The version as of 2022-12-05 is 1.26.0 which
|
||||
# breaks the `TestNamespacedResourceDiffing` e2e test. So we'll pin to 1.25 and then fix the underlying issue.
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
rm /usr/local/bin/kubectl
|
||||
curl -LO https://dl.k8s.io/release/v1.25.4/bin/linux/amd64/kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
- name: Install K3S
|
||||
env:
|
||||
INSTALL_K3S_VERSION: ${{ matrix.k3s-version }}+k3s1
|
||||
@@ -394,7 +399,7 @@ jobs:
|
||||
sudo chown runner $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # v3.0.11
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -420,9 +425,9 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3
|
||||
docker pull ghcr.io/dexidp/dex:v2.36.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.5-alpine
|
||||
docker pull redis:7.0.11-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
@@ -450,7 +455,7 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # v3.1.1
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
3
.github/workflows/codeql.yml
vendored
3
.github/workflows/codeql.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
# Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot
|
||||
branches-ignore:
|
||||
- 'dependabot/**'
|
||||
- 'cherry-pick-*'
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
@@ -29,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
153
.github/workflows/image-reuse.yaml
vendored
Normal file
153
.github/workflows/image-reuse.yaml
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
name: Publish and Sign Container Image
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
go-version:
|
||||
required: true
|
||||
type: string
|
||||
quay_image_name:
|
||||
required: false
|
||||
type: string
|
||||
ghcr_image_name:
|
||||
required: false
|
||||
type: string
|
||||
docker_image_name:
|
||||
required: false
|
||||
type: string
|
||||
platforms:
|
||||
required: true
|
||||
type: string
|
||||
default: linux/amd64
|
||||
push:
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
target:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
secrets:
|
||||
quay_username:
|
||||
required: false
|
||||
quay_password:
|
||||
required: false
|
||||
ghcr_username:
|
||||
required: false
|
||||
ghcr_password:
|
||||
required: false
|
||||
docker_username:
|
||||
required: false
|
||||
docker_password:
|
||||
required: false
|
||||
|
||||
outputs:
|
||||
image-digest:
|
||||
description: "sha256 digest of container image"
|
||||
value: ${{ jobs.publish.outputs.image-digest }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # Used to push images to `ghcr.io` if used.
|
||||
id-token: write # Needed to create an OIDC token for keyless signing
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
image-digest: ${{ steps.image.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.3.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.ref_type == 'tag'}}
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@c3667d99424e7e6047999fb6246c0da843953c65 # v3.0.1
|
||||
with:
|
||||
cosign-release: 'v2.0.0'
|
||||
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
IMAGE_TAGS=$(for str in \
|
||||
${{ inputs.quay_image_name }} \
|
||||
${{ inputs.ghcr_image_name }} \
|
||||
${{ inputs.docker_image_name}}; do
|
||||
echo -n "${str}",;done | sed 's/,$//')
|
||||
|
||||
echo $IMAGE_TAGS
|
||||
echo "TAGS=$IMAGE_TAGS" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup image namespace for signing, strip off the tag
|
||||
run: |
|
||||
TAGS=$(for tag in \
|
||||
${{ inputs.quay_image_name }} \
|
||||
${{ inputs.ghcr_image_name }} \
|
||||
${{ inputs.docker_image_name}}; do
|
||||
echo -n "${tag}" | awk -F ":" '{print $1}' -;done)
|
||||
|
||||
echo $TAGS
|
||||
echo 'SIGNING_TAGS<<EOF' >> $GITHUB_ENV
|
||||
echo $TAGS >> $GITHUB_ENV
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.quay_username }}
|
||||
password: ${{ secrets.quay_password }}
|
||||
if: ${{ inputs.quay_image_name && inputs.push }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.ghcr_username }}
|
||||
password: ${{ secrets.ghcr_password }}
|
||||
if: ${{ inputs.ghcr_image_name && inputs.push }}
|
||||
|
||||
- name: Login to dockerhub Container Registry
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.docker_username }}
|
||||
password: ${{ secrets.docker_password }}
|
||||
if: ${{ inputs.docker_image_name && inputs.push }}
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 #v4.0.0
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ inputs.platforms }}
|
||||
push: ${{ inputs.push }}
|
||||
tags: ${{ env.TAGS }}
|
||||
target: ${{ inputs.target }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
|
||||
- name: Sign container images
|
||||
run: |
|
||||
for signing_tag in $SIGNING_TAGS; do
|
||||
cosign sign \
|
||||
-a "repo=${{ github.repository }}" \
|
||||
-a "workflow=${{ github.workflow }}" \
|
||||
-a "sha=${{ github.sha }}" \
|
||||
-y \
|
||||
"$signing_tag"@${{ steps.image.outputs.digest }}
|
||||
done
|
||||
if: ${{ inputs.push }}
|
||||
136
.github/workflows/image.yaml
vendored
136
.github/workflows/image.yaml
vendored
@@ -9,90 +9,108 @@ on:
|
||||
- master
|
||||
types: [ labeled, unlabeled, opened, synchronize, reopened ]
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
set-vars:
|
||||
permissions:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
contents: read
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
outputs:
|
||||
image-tag: ${{ steps.image.outputs.tag}}
|
||||
platforms: ${{ steps.platforms.outputs.platforms }}
|
||||
steps:
|
||||
- uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
|
||||
# get image tag
|
||||
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
- name: Set image tag for ghcr
|
||||
run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
id: image
|
||||
|
||||
# login
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
|
||||
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
USERNAME: ${{ github.actor }}
|
||||
PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v2.2.1
|
||||
- run: |
|
||||
- name: Determine image platforms to use
|
||||
id: platforms
|
||||
run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-multi-image') }}" == "true" ]]
|
||||
then
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
echo "platforms=$IMAGE_PLATFORMS" >> $GITHUB_OUTPUT
|
||||
|
||||
# sign container images
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
build-only:
|
||||
needs: [set-vars]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name != 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.19
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
- name: Sign Argo CD latest image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd:latest
|
||||
# Displays the public key to share.
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
build-and-publish:
|
||||
needs: [set-vars]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:latest
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.19
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# deploy
|
||||
build-and-publish-provenance:
|
||||
needs: [build-and-publish]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.5.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
secrets:
|
||||
registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
registry-password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
Deploy:
|
||||
needs:
|
||||
- build-and-publish
|
||||
- set-vars
|
||||
permissions:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
- run: |
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }}
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
|
||||
if: github.event_name == 'push'
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ needs.set-vars.outputs.image-tag }}' && git push)
|
||||
working-directory: argoproj-deployments/argocd
|
||||
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-GitHub-Package-Registry/m-p/41202/thread-id/9811
|
||||
|
||||
|
||||
70
.github/workflows/init-release.yaml
vendored
Normal file
70
.github/workflows/init-release.yaml
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
name: Init ArgoCD Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
TARGET_BRANCH:
|
||||
description: 'TARGET_BRANCH to checkout (e.g. release-2.5)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
TARGET_VERSION:
|
||||
description: 'TARGET_VERSION to build manifests (e.g. 2.5.0-rc1) Note: the `v` prefix is not used'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
permissions:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically generate version and manifests on ${{ inputs.TARGET_BRANCH }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
ref: ${{ inputs.TARGET_BRANCH }}
|
||||
|
||||
- name: Check if TARGET_VERSION is well formed.
|
||||
run: |
|
||||
set -xue
|
||||
# Target version must not contain 'v' prefix
|
||||
if echo "${{ inputs.TARGET_VERSION }}" | grep -e '^v'; then
|
||||
echo "::error::Target version '${{ inputs.TARGET_VERSION }}' should not begin with a 'v' prefix, refusing to continue." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create VERSION information
|
||||
run: |
|
||||
set -ue
|
||||
echo "Bumping version from $(cat VERSION) to ${{ inputs.TARGET_VERSION }}"
|
||||
echo "${{ inputs.TARGET_VERSION }}" > VERSION
|
||||
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
|
||||
- name: Generate new set of manifests
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
make manifests-local VERSION=${{ inputs.TARGET_VERSION }}
|
||||
git diff
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@38e0b6e68b4c852a5500a94740f0e535e0d7ba54 # v4.2.4
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
body: Updating VERSION and manifests to ${{ inputs.TARGET_VERSION }}
|
||||
branch: update-version
|
||||
branch-suffix: random
|
||||
signoff: true
|
||||
labels: release
|
||||
|
||||
|
||||
41
.github/workflows/pr-title-check.yml
vendored
Normal file
41
.github/workflows/pr-title-check.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: "Lint PR"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
# IMPORTANT: No checkout actions, scripts, or builds should be added to this workflow. Permissions should always be used
|
||||
# with extreme caution.
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# PR updates can happen in quick succession leading to this
|
||||
# workflow being trigger a number of times. This limits it
|
||||
# to one run per PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
jobs:
|
||||
main:
|
||||
permissions:
|
||||
pull-requests: read # for amannn/action-semantic-pull-request to analyze PRs
|
||||
statuses: write # for amannn/action-semantic-pull-request to mark status of analyzed PR
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# IMPORTANT: Carefully review changes when updating this action. Using the pull_request_target event requires caution.
|
||||
- uses: amannn/action-semantic-pull-request@b6bca70dcd3e56e896605356ce09b76f7e1e0d39 # v5.1.0
|
||||
with:
|
||||
types: |
|
||||
feat
|
||||
fix
|
||||
docs
|
||||
test
|
||||
ci
|
||||
chore
|
||||
[Bot] docs
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
480
.github/workflows/release.yaml
vendored
480
.github/workflows/release.yaml
vendored
@@ -1,46 +1,141 @@
|
||||
name: Create ArgoCD release
|
||||
name: Publish ArgoCD Release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "release-v*"
|
||||
- "!release-v1.5*"
|
||||
- "!release-v1.4*"
|
||||
- "!release-v1.3*"
|
||||
- "!release-v1.2*"
|
||||
- "!release-v1.1*"
|
||||
- "!release-v1.0*"
|
||||
- "!release-v0*"
|
||||
- 'v*'
|
||||
- '!v2.4*'
|
||||
- '!v2.5*'
|
||||
- '!v2.6*'
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.19' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
argocd-image:
|
||||
permissions:
|
||||
contents: write # To push changes to release branch
|
||||
name: Perform automatic release on trigger ${{ github.ref }}
|
||||
contents: read
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # used to push images to `ghcr.io` if used.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.19
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
argocd-image-provenance:
|
||||
needs: [argocd-image]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.5.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
secrets:
|
||||
registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
registry-password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
goreleaser:
|
||||
needs:
|
||||
- argocd-image
|
||||
- argocd-image-provenance
|
||||
permissions:
|
||||
contents: write # used for uploading assets
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Fetch all tags
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Set GORELEASER_PREVIOUS_TAG # Workaround, GoReleaser uses 'git-describe' to determine a previous tag. Our tags are created in realease branches.
|
||||
run: |
|
||||
set -xue
|
||||
if echo ${{ github.ref_name }} | grep -E -- '-rc1+$';then
|
||||
echo "GORELEASER_PREVIOUS_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n 2 | head -n 1)" >> $GITHUB_ENV
|
||||
else
|
||||
echo "This is not the first release on the branch, Using GoReleaser defaults"
|
||||
fi
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Set environment variables for ldflags
|
||||
id: set_ldflag
|
||||
run: |
|
||||
echo "KUBECTL_VERSION=$(go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@f82d6c1c344bcacabba2c841718984797f664a6b # v4.2.0
|
||||
id: run-goreleaser
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean --timeout 55m
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
|
||||
GIT_TREE_STATE: ${{ env.GIT_TREE_STATE }}
|
||||
|
||||
- name: Generate subject for provenance
|
||||
id: hash
|
||||
env:
|
||||
ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
hashes=$(echo $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0)
|
||||
if test "$hashes" = ""; then # goreleaser < v1.13.0
|
||||
checksum_file=$(echo "$ARTIFACTS" | jq -r '.[] | select (.type=="Checksum") | .path')
|
||||
hashes=$(cat $checksum_file | base64 -w0)
|
||||
fi
|
||||
echo "hashes=$hashes" >> $GITHUB_OUTPUT
|
||||
|
||||
goreleaser-provenance:
|
||||
needs: [goreleaser]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.5.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
upload-assets: true
|
||||
|
||||
generate-sbom:
|
||||
name: Create Sbom and sign assets
|
||||
needs:
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
permissions:
|
||||
contents: write # Needed for release uploads
|
||||
id-token: write # Needed for signing Sbom
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
# The name of the tag as supplied by the GitHub event
|
||||
SOURCE_TAG: ${{ github.ref }}
|
||||
# The image namespace where Docker image will be published to
|
||||
IMAGE_NAMESPACE: quay.io/argoproj
|
||||
# Whether to create & push image and release assets
|
||||
DRY_RUN: false
|
||||
# Whether a draft release should be created, instead of public one
|
||||
DRAFT_RELEASE: false
|
||||
# Whether to update homebrew with this release as well
|
||||
# Set RELEASE_HOMEBREW_TOKEN secret in repository for this to work - needs
|
||||
# access to public repositories
|
||||
UPDATE_HOMEBREW: false
|
||||
# Name of the GitHub user for Git config
|
||||
GIT_USERNAME: argo-bot
|
||||
# E-Mail of the GitHub user for Git config
|
||||
GIT_EMAIL: argoproj@gmail.com
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
@@ -48,212 +143,15 @@ jobs:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check if the published tag is well formed and setup vars
|
||||
run: |
|
||||
set -xue
|
||||
# Target version must match major.minor.patch and optional -rcX suffix
|
||||
# where X must be a number.
|
||||
TARGET_VERSION=${SOURCE_TAG#*release-v}
|
||||
if ! echo "${TARGET_VERSION}" | egrep '^[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)*$'; then
|
||||
echo "::error::Target version '${TARGET_VERSION}' is malformed, refusing to continue." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Target branch is the release branch we're going to operate on
|
||||
# Its name is 'release-<major>.<minor>'
|
||||
TARGET_BRANCH="release-${TARGET_VERSION%\.[0-9]*}"
|
||||
|
||||
# The release tag is the source tag, minus the release- prefix
|
||||
RELEASE_TAG="${SOURCE_TAG#*release-}"
|
||||
|
||||
# Whether this is a pre-release (indicated by -rc suffix)
|
||||
PRE_RELEASE=false
|
||||
if echo "${RELEASE_TAG}" | egrep -- '-rc[0-9]+$'; then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
# We must not have a release trigger within the same release branch,
|
||||
# because that means a release for this branch is already running.
|
||||
if git tag -l | grep "release-v${TARGET_VERSION%\.[0-9]*}" | grep -v "release-v${TARGET_VERSION}"; then
|
||||
echo "::error::Another release for branch ${TARGET_BRANCH} is currently in progress."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure that release do not yet exist
|
||||
if git rev-parse ${RELEASE_TAG}; then
|
||||
echo "::error::Release tag ${RELEASE_TAG} already exists in repository. Refusing to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make the variables available in follow-up steps
|
||||
echo "TARGET_VERSION=${TARGET_VERSION}" >> $GITHUB_ENV
|
||||
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> $GITHUB_ENV
|
||||
echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV
|
||||
echo "PRE_RELEASE=${PRE_RELEASE}" >> $GITHUB_ENV
|
||||
|
||||
- name: Check if our release tag has a correct annotation
|
||||
run: |
|
||||
set -ue
|
||||
# Fetch all tag information as well
|
||||
git fetch --prune --tags --force
|
||||
|
||||
echo "=========== BEGIN COMMIT MESSAGE ============="
|
||||
git show ${SOURCE_TAG}
|
||||
echo "============ END COMMIT MESSAGE =============="
|
||||
|
||||
# Quite dirty hack to get the release notes from the annotated tag
|
||||
# into a temporary file.
|
||||
RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX)
|
||||
|
||||
prefix=true
|
||||
begin=false
|
||||
git show ${SOURCE_TAG} | while read line; do
|
||||
# Whatever is in commit history for the tag, we only want that
|
||||
# annotation from our tag. We discard everything else.
|
||||
if test "$begin" = "false"; then
|
||||
if echo "$line" | grep -q "tag ${SOURCE_TAG#refs/tags/}"; then begin="true"; fi
|
||||
continue
|
||||
fi
|
||||
if test "$prefix" = "true"; then
|
||||
if test -z "$line"; then prefix=false; fi
|
||||
else
|
||||
if echo "$line" | egrep -q '^commit [0-9a-f]+'; then
|
||||
break
|
||||
fi
|
||||
echo "$line" >> ${RELEASE_NOTES}
|
||||
fi
|
||||
done
|
||||
|
||||
# For debug purposes
|
||||
echo "============BEGIN RELEASE NOTES================="
|
||||
cat ${RELEASE_NOTES}
|
||||
echo "=============END RELEASE NOTES=================="
|
||||
|
||||
# Too short release notes are suspicious. We need at least 100 bytes.
|
||||
relNoteLen=$(stat -c '%s' $RELEASE_NOTES)
|
||||
if test $relNoteLen -lt 100; then
|
||||
echo "::error::No release notes provided in tag annotation (or tag is not annotated)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for magic string '## Quick Start' in head of release notes
|
||||
if ! head -2 ${RELEASE_NOTES} | grep -iq '## Quick Start'; then
|
||||
echo "::error::Release notes seem invalid, quick start section not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# We store path to temporary release notes file for later reading, we
|
||||
# need it when creating release.
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 # v3.4.0
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Setup Git author information
|
||||
run: |
|
||||
set -ue
|
||||
git config --global user.email "${GIT_EMAIL}"
|
||||
git config --global user.name "${GIT_USERNAME}"
|
||||
|
||||
- name: Checkout corresponding release branch
|
||||
run: |
|
||||
set -ue
|
||||
echo "Switching to release branch '${TARGET_BRANCH}'"
|
||||
if ! git checkout ${TARGET_BRANCH}; then
|
||||
echo "::error::Checking out release branch '${TARGET_BRANCH}' for target version '${TARGET_VERSION}' (tagged '${RELEASE_TAG}') failed. Does it exist in repo?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create VERSION information
|
||||
run: |
|
||||
set -ue
|
||||
echo "Bumping version from $(cat VERSION) to ${TARGET_VERSION}"
|
||||
echo "${TARGET_VERSION}" > VERSION
|
||||
git commit -m "Bump version to ${TARGET_VERSION}" VERSION
|
||||
|
||||
- name: Generate new set of manifests
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
make manifests-local VERSION=${TARGET_VERSION}
|
||||
git diff
|
||||
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
|
||||
|
||||
- name: Create the release tag
|
||||
run: |
|
||||
set -ue
|
||||
echo "Creating release ${RELEASE_TAG}"
|
||||
git tag ${RELEASE_TAG}
|
||||
|
||||
- name: Login to docker repositories
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
|
||||
QUAY_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
run: |
|
||||
set -ue
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
|
||||
# Remove the following when Docker Hub is gone
|
||||
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@8c0edbc76e98fa90f69d9a2c020dcb50019dc325 # v2.2.1
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
./dist/argocd-linux-amd64 version --client
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
uses: sigstore/cosign-installer@c3667d99424e7e6047999fb6246c0da843953c65 # v3.0.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
|
||||
- name: Sign Argo CD container images and assets
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
|
||||
# Retrieves the public key to release as an asset
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
- name: Push changes to release branch
|
||||
run: |
|
||||
set -ue
|
||||
git push origin ${TARGET_BRANCH}
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: create_release
|
||||
with:
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
release_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }}
|
||||
if: ${{ env.DRY_RUN == 'true' }}
|
||||
cosign-release: 'v2.0.0'
|
||||
|
||||
- name: Generate SBOM (spdx)
|
||||
id: spdx-builder
|
||||
@@ -264,9 +162,9 @@ jobs:
|
||||
SIGS_BOM_VERSION: v0.2.1
|
||||
# comma delimited list of project relative folders to inspect for package
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
DOCKER_IMAGE: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
run: |
|
||||
yarn install --cwd ./ui
|
||||
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
|
||||
@@ -284,43 +182,101 @@ jobs:
|
||||
fi
|
||||
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Sign sbom
|
||||
- name: Sign SBOM
|
||||
run: |
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
cosign sign-blob \
|
||||
--output-certificate=/tmp/sbom.tar.gz.pem \
|
||||
--output-signature=/tmp/sbom.tar.gz.sig \
|
||||
-y \
|
||||
/tmp/sbom.tar.gz
|
||||
|
||||
- name: Create GitHub release
|
||||
- name: Upload SBOM and signature assets
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
name: ${{ env.RELEASE_TAG }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
|
||||
files: |
|
||||
dist/argocd-*
|
||||
/tmp/sbom.tar.gz
|
||||
/tmp/sbom.tar.gz.sig
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
/tmp/sbom.tar.*
|
||||
|
||||
- name: Update homebrew formula
|
||||
env:
|
||||
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
|
||||
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
|
||||
post-release:
|
||||
needs:
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
- generate-sbom
|
||||
permissions:
|
||||
contents: write # Needed to push commit to update stable tag
|
||||
pull-requests: write # Needed to create PR for VERSION update.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
with:
|
||||
token: ${{env.HOMEBREW_TOKEN}}
|
||||
formula: argocd
|
||||
if: ${{ env.HOMEBREW_TOKEN != '' && env.UPDATE_HOMEBREW == 'true' && env.PRE_RELEASE != 'true' }}
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Delete original request tag from repository
|
||||
- name: Setup Git author information
|
||||
run: |
|
||||
set -ue
|
||||
git push --delete origin ${SOURCE_TAG}
|
||||
if: ${{ always() }}
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
|
||||
- name: Check if tag is the latest version and not a pre-release
|
||||
run: |
|
||||
set -xue
|
||||
# Fetch all tag information
|
||||
git fetch --prune --tags --force
|
||||
|
||||
LATEST_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n1)
|
||||
|
||||
PRE_RELEASE=false
|
||||
# Check if latest tag is a pre-release
|
||||
if echo $LATEST_TAG | grep -E -- '-rc[0-9]+$';then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
# Ensure latest tag matches github.ref_name & not a pre-release
|
||||
if [[ $LATEST_TAG == ${{ github.ref_name }} ]] && [[ $PRE_RELEASE != 'true' ]];then
|
||||
echo "TAG_STABLE=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "TAG_STABLE=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update stable tag to latest version
|
||||
run: |
|
||||
git tag -f stable ${{ github.ref_name }}
|
||||
git push -f origin stable
|
||||
if: ${{ env.TAG_STABLE == 'true' }}
|
||||
|
||||
- name: Check to see if VERSION should be updated on master branch
|
||||
run: |
|
||||
set -xue
|
||||
SOURCE_TAG=${{ github.ref_name }}
|
||||
VERSION_REF="${SOURCE_TAG#*v}"
|
||||
if echo "$VERSION_REF" | grep -E -- '^[0-9]+\.[0-9]+\.0$';then
|
||||
VERSION=$(awk 'BEGIN {FS=OFS="."} {$2++; print}' <<< "${VERSION_REF}")
|
||||
echo "Updating VERSION to: $VERSION"
|
||||
echo "UPDATE_VERSION=true" >> $GITHUB_ENV
|
||||
echo "NEW_VERSION=$VERSION" >> $GITHUB_ENV
|
||||
else
|
||||
echo "Not updating VERSION"
|
||||
echo "UPDATE_VERSION=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update VERSION on master branch
|
||||
run: |
|
||||
echo ${{ env.NEW_VERSION }} > VERSION
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
- name: Create PR to update VERSION on master branch
|
||||
uses: peter-evans/create-pull-request@38e0b6e68b4c852a5500a94740f0e535e0d7ba54 # v4.2.4
|
||||
with:
|
||||
commit-message: Bump version in master
|
||||
title: "chore: Bump version in master"
|
||||
body: All images built from master should indicate which version we are on track for.
|
||||
signoff: true
|
||||
branch: update-version
|
||||
branch-suffix: random
|
||||
base: master
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
67
.github/workflows/scorecard.yaml
vendored
Normal file
67
.github/workflows/scorecard.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Scorecards supply-chain security
|
||||
on:
|
||||
# Only the default branch is supported.
|
||||
branch_protection_rule:
|
||||
schedule:
|
||||
- cron: "39 9 * * 2"
|
||||
push:
|
||||
branches: ["master"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecards analysis
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Used to receive a badge. (Upcoming feature)
|
||||
id-token: write
|
||||
# Needs for private repositories.
|
||||
contents: read
|
||||
actions: read
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) Read-only PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecards on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
|
||||
# repo_token: ${{ secrets.SCORECARD_READ_TOKEN }}
|
||||
|
||||
# Publish the results for public repositories to enable scorecard badges. For more details, see
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@3ebbd71c74ef574dbc558c82f70e52732c8b44fe # v2.2.1
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
4
.github/workflows/update-snyk.yaml
vendored
4
.github/workflows/update-snyk.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
@@ -31,6 +31,6 @@ jobs:
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git add docs/snyk
|
||||
git commit -m "[Bot] Update Snyk reports" --signoff
|
||||
git commit -m "[Bot] docs: Update Snyk reports" --signoff
|
||||
git push --set-upstream origin "$pr_branch"
|
||||
gh pr create -B master -H "$pr_branch" --title '[Bot] docs: Update Snyk report' --body ''
|
||||
|
||||
2
.gitpod.Dockerfile
vendored
2
.gitpod.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM gitpod/workspace-full
|
||||
FROM gitpod/workspace-full@sha256:d5787229cd062aceae91109f1690013d3f25062916492fb7f444d13de3186178
|
||||
|
||||
USER root
|
||||
|
||||
|
||||
120
.goreleaser.yaml
Normal file
120
.goreleaser.yaml
Normal file
@@ -0,0 +1,120 @@
|
||||
project_name: argocd
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod download
|
||||
- make build-ui
|
||||
|
||||
builds:
|
||||
- id: argocd-cli
|
||||
main: ./cmd
|
||||
binary: argocd-{{ .Os}}-{{ .Arch}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -v
|
||||
ldflags:
|
||||
- -X github.com/argoproj/argo-cd/v2/common.version={{ .Version }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.buildDate={{ .Date }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.gitCommit={{ .FullCommit }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.gitTreeState={{ .Env.GIT_TREE_STATE }}
|
||||
- -X github.com/argoproj/argo-cd/v2/common.kubectlVersion={{ .Env.KUBECTL_VERSION }}
|
||||
- -extldflags="-static"
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- s390x
|
||||
- ppc64le
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: s390x
|
||||
- goos: darwin
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: s390x
|
||||
- goos: windows
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
|
||||
archives:
|
||||
- id: argocd-archive
|
||||
builds:
|
||||
- argocd-cli
|
||||
name_template: |-
|
||||
{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}
|
||||
format: binary
|
||||
|
||||
checksum:
|
||||
name_template: 'cli_checksums.txt'
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
draft: false
|
||||
header: |
|
||||
## Quick Start
|
||||
|
||||
### Non-HA:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/install.yaml
|
||||
```
|
||||
|
||||
### HA:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/ha/install.yaml
|
||||
```
|
||||
|
||||
## Release Signatures and Provenance
|
||||
|
||||
All Argo CD container images are signed by cosign. A Provenance is generated for container images and CLI binaries which meet the SLSA Level 3 specifications. See the [documentation](https://argo-cd.readthedocs.io/en/stable/operator-manual/signed-release-assets) on how to verify.
|
||||
|
||||
|
||||
## Upgrading
|
||||
|
||||
If upgrading from a different minor version, be sure to read the [upgrading](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) documentation.
|
||||
footer: |
|
||||
**Full Changelog**: https://github.com/argoproj/argo-cd/compare/{{ .PreviousTag }}...{{ .Tag }}
|
||||
|
||||
<a href="https://argoproj.github.io/cd/"><img src="https://raw.githubusercontent.com/argoproj/argo-site/master/content/pages/cd/gitops-cd.png" width="25%" ></a>
|
||||
|
||||
|
||||
snapshot: #### To be removed for PR
|
||||
name_template: "2.6.0"
|
||||
|
||||
changelog:
|
||||
use:
|
||||
github
|
||||
sort: asc
|
||||
abbrev: 0
|
||||
groups: # Regex use RE2 syntax as defined here: https://github.com/google/re2/wiki/Syntax.
|
||||
- title: 'Features'
|
||||
regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$'
|
||||
order: 100
|
||||
- title: 'Bug fixes'
|
||||
regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$'
|
||||
order: 200
|
||||
- title: 'Documentation'
|
||||
regexp: '^.*?docs(\([[:word:]]+\))??!?:.+$'
|
||||
order: 300
|
||||
- title: 'Dependency updates'
|
||||
regexp: '^.*?(feat|fix|chore)\(deps?.+\)!?:.+$'
|
||||
order: 400
|
||||
- title: 'Other work'
|
||||
order: 999
|
||||
filters:
|
||||
exclude:
|
||||
- '^test:'
|
||||
- '^.*?Bump(\([[:word:]]+\))?.+$'
|
||||
|
||||
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
|
||||
@@ -4,4 +4,8 @@ mkdocs:
|
||||
fail_on_warning: false
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
- requirements: docs/requirements.txt
|
||||
build:
|
||||
os: "ubuntu-22.04"
|
||||
tools:
|
||||
python: "3.7"
|
||||
|
||||
11
Dockerfile
11
Dockerfile
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:9a0bdde4188b896a372804be2384015e90e3f84906b750c1a53539b585fbbe7f
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.18 AS builder
|
||||
FROM docker.io/library/golang:1.19.6@sha256:7ce31d15a3a4dbf20446cccffa4020d3a2974ad2287d96123f55caf22c7adb71 AS builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -36,6 +36,8 @@ RUN ./install.sh helm-linux && \
|
||||
####################################################################################################
|
||||
FROM $BASE_IMAGE AS argocd-base
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/argoproj/argo-cd"
|
||||
|
||||
USER root
|
||||
|
||||
ENV ARGOCD_USER_ID=999
|
||||
@@ -81,7 +83,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:18.15.0@sha256:8d9a875ee427897ef245302e31e2319385b092f1c3368b497e89790f240368f5 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -99,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.18 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.19.6@sha256:7ce31d15a3a4dbf20446cccffa4020d3a2974ad2287d96123f55caf22c7adb71 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
@@ -130,3 +132,4 @@ RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
|
||||
|
||||
USER $ARGOCD_USER_ID
|
||||
ENTRYPOINT ["/usr/bin/tini", "--"]
|
||||
|
||||
97
Makefile
97
Makefile
@@ -64,13 +64,20 @@ else
|
||||
DOCKER_SRC_MOUNT="$(PWD):/go/src/github.com/argoproj/argo-cd$(VOLUME_MOUNT)"
|
||||
endif
|
||||
|
||||
# User and group IDs to map to the test container
|
||||
CONTAINER_UID=$(shell id -u)
|
||||
CONTAINER_GID=$(shell id -g)
|
||||
|
||||
# Set SUDO to sudo to run privileged commands with sudo
|
||||
SUDO?=
|
||||
|
||||
# Runs any command in the argocd-test-utils container in server mode
|
||||
# Server mode container will start with uid 0 and drop privileges during runtime
|
||||
define run-in-test-server
|
||||
docker run --rm -it \
|
||||
$(SUDO) docker run --rm -it \
|
||||
--name argocd-test-server \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
-e USER_ID=$(shell id -u) \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-e USER_ID=$(CONTAINER_UID) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
@@ -98,9 +105,9 @@ endef
|
||||
|
||||
# Runs any command in the argocd-test-utils container in client mode
|
||||
define run-in-test-client
|
||||
docker run --rm -it \
|
||||
$(SUDO) docker run --rm -it \
|
||||
--name argocd-test-client \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
|
||||
@@ -119,7 +126,7 @@ endef
|
||||
|
||||
#
|
||||
define exec-in-test-server
|
||||
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
$(SUDO) docker exec -it -u $(CONTAINER_UID):$(CONTAINER_GID) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
endef
|
||||
|
||||
PATH:=$(PATH):$(PWD)/hack
|
||||
@@ -212,7 +219,7 @@ clidocsgen: ensure-gopath
|
||||
|
||||
|
||||
.PHONY: codegen-local
|
||||
codegen-local: ensure-gopath mod-vendor-local notification-docs notification-catalog gogen protogen clientgen openapigen clidocsgen manifests-local
|
||||
codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local notification-docs notification-catalog
|
||||
rm -rf vendor/
|
||||
|
||||
.PHONY: codegen
|
||||
@@ -225,11 +232,11 @@ cli: test-tools-image
|
||||
|
||||
.PHONY: cli-local
|
||||
cli-local: clean-debug
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
|
||||
.PHONY: gen-resources-cli-local
|
||||
gen-resources-cli-local: clean-debug
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
|
||||
.PHONY: release-cli
|
||||
release-cli: clean-debug build-ui
|
||||
@@ -244,8 +251,8 @@ release-cli: clean-debug build-ui
|
||||
.PHONY: test-tools-image
|
||||
test-tools-image:
|
||||
ifndef SKIP_TEST_TOOLS_IMAGE
|
||||
docker build --build-arg UID=$(shell id -u) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
$(SUDO) docker build --build-arg UID=$(CONTAINER_UID) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
$(SUDO) docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
endif
|
||||
|
||||
.PHONY: manifests-local
|
||||
@@ -259,19 +266,19 @@ manifests: test-tools-image
|
||||
# consolidated binary for cli, util, server, repo-server, controller
|
||||
.PHONY: argocd-all
|
||||
argocd-all: clean-debug
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
|
||||
.PHONY: server
|
||||
server: clean-debug
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
|
||||
|
||||
.PHONY: repo-server
|
||||
repo-server:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
|
||||
|
||||
.PHONY: controller
|
||||
controller:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
CGO_ENABLED=0 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
|
||||
.PHONY: build-ui
|
||||
build-ui:
|
||||
@@ -287,7 +294,7 @@ ifeq ($(DEV_IMAGE), true)
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 docker build --platform=linux/amd64 -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
|
||||
@@ -326,7 +333,7 @@ mod-vendor: test-tools-image
|
||||
mod-vendor-local: mod-download-local
|
||||
go mod vendor
|
||||
|
||||
# Deprecated - replace by install-local-tools
|
||||
# Deprecated - replace by install-tools-local
|
||||
.PHONY: install-lint-tools
|
||||
install-lint-tools:
|
||||
./hack/install.sh lint-tools
|
||||
@@ -361,7 +368,7 @@ build: test-tools-image
|
||||
# Build all Go code (local version)
|
||||
.PHONY: build-local
|
||||
build-local:
|
||||
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
|
||||
GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
|
||||
|
||||
# Run all unit tests
|
||||
#
|
||||
@@ -572,7 +579,7 @@ list:
|
||||
|
||||
.PHONY: applicationset-controller
|
||||
applicationset-controller:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
GODEBUG="tarinsecurepath=0,zipinsecurepath=0" CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
|
||||
.PHONY: checksums
|
||||
checksums:
|
||||
@@ -589,3 +596,55 @@ snyk-non-container-tests:
|
||||
.PHONY: snyk-report
|
||||
snyk-report:
|
||||
./hack/snyk-report.sh $(target_branch)
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo 'Note: Generally an item w/ (-local) will run inside docker unless you use the -local variant'
|
||||
@echo
|
||||
@echo 'Common targets'
|
||||
@echo
|
||||
@echo 'all -- make cli and image'
|
||||
@echo
|
||||
@echo 'components:'
|
||||
@echo ' applicationset-controller -- applicationset controller'
|
||||
@echo ' cli(-local) -- argocd cli program'
|
||||
@echo ' controller -- controller (orchestrator)'
|
||||
@echo ' repo-server -- repo server (manage repository instances)'
|
||||
@echo ' server -- argocd web application'
|
||||
@echo
|
||||
@echo 'build:'
|
||||
@echo ' image -- make image of the following items'
|
||||
@echo ' build(-local) -- compile go'
|
||||
@echo ' build-docs(-local) -- build docs'
|
||||
@echo ' build-ui -- compile typescript'
|
||||
@echo
|
||||
@echo 'run:'
|
||||
@echo ' run -- run the components locally'
|
||||
@echo ' serve-docs(-local) -- expose the documents for viewing in a browser'
|
||||
@echo
|
||||
@echo 'release:'
|
||||
@echo ' release-cli'
|
||||
@echo ' release-precheck'
|
||||
@echo ' checksums'
|
||||
@echo
|
||||
@echo 'docs:'
|
||||
@echo ' build-docs(-local)'
|
||||
@echo ' serve-docs(-local)'
|
||||
@echo ' notification-docs'
|
||||
@echo ' clidocsgen'
|
||||
@echo
|
||||
@echo 'testing:'
|
||||
@echo ' test(-local)'
|
||||
@echo ' start-e2e(-local)'
|
||||
@echo ' test-e2e(-local)'
|
||||
@echo ' test-race(-local)'
|
||||
@echo
|
||||
@echo 'debug:'
|
||||
@echo ' list -- list all make targets'
|
||||
@echo ' install-tools-local -- install all the tools below'
|
||||
@echo ' install-lint-tools(-local)'
|
||||
@echo
|
||||
@echo 'codegen:'
|
||||
@echo ' codegen(-local) -- if using -local, run the following targets first'
|
||||
@echo ' install-codegen-tools-local -- run this to install the codegen tools'
|
||||
@echo ' install-go-tools-local -- run this to install go libraries for codegen'
|
||||
14
Procfile
14
Procfile
@@ -1,12 +1,12 @@
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" == 'true' ] && exit 0 || [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
|
||||
|
||||
16
README.md
16
README.md
@@ -1,6 +1,18 @@
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [](https://argoproj.github.io/community/join-slack) [](https://codecov.io/gh/argoproj/argo-cd) [](https://github.com/argoproj/argo-cd/releases/latest) [](https://bestpractices.coreinfrastructure.org/projects/4486) [](https://twitter.com/argoproj)
|
||||
**Releases:**
|
||||
[](https://github.com/argoproj/argo-cd/releases/latest)
|
||||
[](https://artifacthub.io/packages/helm/argo/argo-cd)
|
||||
|
||||
**Code:**
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
|
||||
[](https://codecov.io/gh/argoproj/argo-cd)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4486)
|
||||
[](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-cd)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd?ref=badge_shield)
|
||||
|
||||
**Social:**
|
||||
[](https://twitter.com/argoproj)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
## What is Argo CD?
|
||||
@@ -69,7 +81,7 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
1. [Applied GitOps with Argo CD](https://thenewstack.io/applied-gitops-with-argocd/)
|
||||
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
|
||||
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
|
||||
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
|
||||
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
|
||||
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
|
||||
1. [How to create Argo CD Applications Automatically using ApplicationSet? "Automation of GitOps"](https://amralaayassen.medium.com/how-to-create-argocd-applications-automatically-using-applicationset-automation-of-the-gitops-59455eaf4f72)
|
||||
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -1,6 +1,6 @@
|
||||
# Security Policy for Argo CD
|
||||
|
||||
Version: **v1.4 (2022-01-23)**
|
||||
Version: **v1.5 (2023-03-06)**
|
||||
|
||||
## Preface
|
||||
|
||||
@@ -41,7 +41,7 @@ previous to the most recent one (`N-1`, e.g. `1.7`). With the release of
|
||||
|
||||
We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the
|
||||
supported versions, which will contain fixes for security vulnerabilities and
|
||||
important bugs. Prior releases might receive critical security fixes on a best
|
||||
important bugs. Prior releases might receive critical security fixes on best
|
||||
effort basis, however, it cannot be guaranteed that security fixes get
|
||||
back-ported to these unsupported versions.
|
||||
|
||||
@@ -61,14 +61,28 @@ and disclosure with you. Sometimes, it might take a little longer for us to
|
||||
react (e.g. out of office conditions), so please bear with us in these cases.
|
||||
|
||||
We will publish security advisories using the
|
||||
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
|
||||
feature to keep our community well informed, and will credit you for your
|
||||
[GitHub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
|
||||
feature to keep our community well-informed, and will credit you for your
|
||||
findings (unless you prefer to stay anonymous, of course).
|
||||
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
|
||||
* cncf-argo-security@lists.cncf.io
|
||||
|
||||
## Internet Bug Bounty collaboration
|
||||
|
||||
We're happy to announce that the Argo project is collaborating with the great
|
||||
folks over at
|
||||
[Hacker One](https://hackerone.com/) and their
|
||||
[Internet Bug Bounty program](https://hackerone.com/ibb)
|
||||
to reward the awesome people who find security vulnerabilities in the four
|
||||
main Argo projects (CD, Events, Rollouts and Workflows) and then work with
|
||||
us to fix and disclose them in a responsible manner.
|
||||
|
||||
If you report a vulnerability to us as outlined in this security policy, we
|
||||
will work together with you to find out whether your finding is eligible for
|
||||
claiming a bounty, and also on how to claim it.
|
||||
|
||||
## Securing your Argo CD Instance
|
||||
|
||||
See the [operator manual security page](docs/operator-manual/security.md) for
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://argo-cd.readthedocs.io/en/latest/security_considerations/#reporting-vulnerabilities
|
||||
# INSTRUCTIONS AT https://github.com/argoproj/argo-cd/security/policy
|
||||
|
||||
alexmt
|
||||
edlee2121
|
||||
|
||||
20
USERS.md
20
USERS.md
@@ -11,6 +11,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [Adfinis](https://adfinis.com)
|
||||
1. [Adventure](https://jp.adventurekk.com/)
|
||||
1. [Adyen](https://www.adyen.com)
|
||||
1. [AirQo](https://airqo.net/)
|
||||
1. [Akuity](https://akuity.io/)
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
@@ -31,6 +32,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [BMW Group](https://www.bmwgroup.com/)
|
||||
1. [PT Boer Technology (Btech)](https://btech.id/)
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Bulder Bank](https://bulderbank.no)
|
||||
@@ -44,7 +46,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Cloud Posse](https://www.cloudposse.com/)
|
||||
1. [Cloud Scale](https://cloudscaleinc.com/)
|
||||
1. [Cloudmate](https://cloudmt.co.kr/)
|
||||
1. [Cobalt](https://www.cobalt.io/)
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
@@ -60,6 +64,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Deutsche Telekom AG](https://telekom.com)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [DigitalOcean](https://www.digitalocean.com)
|
||||
1. [Divistant](https://divistant.com)
|
||||
1. [Doximity](https://www.doximity.com/)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
@@ -72,6 +77,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [enigmo](https://enigmo.co.jp/)
|
||||
1. [Envoy](https://envoy.com/)
|
||||
1. [Farfetch](https://www.farfetch.com)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flip](https://flip.id)
|
||||
@@ -93,6 +99,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Gojek](https://www.gojek.io/)
|
||||
1. [Greenpass](https://www.greenpass.com.br/)
|
||||
1. [Gridfuse](https://gridfuse.com/)
|
||||
1. [Groww](https://groww.in)
|
||||
1. [Grupo MasMovil](https://grupomasmovil.com/en/)
|
||||
1. [Handelsbanken](https://www.handelsbanken.se)
|
||||
1. [Healy](https://www.healyworld.net)
|
||||
@@ -107,6 +114,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Indeed](https://indeed.com)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
1. [Info Support](https://www.infosupport.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
@@ -128,6 +136,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Liatrio](https://www.liatrio.com)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [LINE](https://linecorp.com/en/)
|
||||
1. [Loom](https://www.loom.com/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
1. [Magic Leap](https://www.magicleap.com/)
|
||||
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
|
||||
@@ -153,15 +162,19 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Nextdoor](https://nextdoor.com/)
|
||||
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
|
||||
1. [Nitro](https://gonitro.com)
|
||||
1. [NYCU, CS IT Center](https://it.cs.nycu.edu.tw)
|
||||
1. [Objective](https://www.objective.com.br/)
|
||||
1. [OCCMundial](https://occ.com.mx)
|
||||
1. [Octadesk](https://octadesk.com)
|
||||
1. [omegaUp](https://omegaUp.com)
|
||||
1. [Omni](https://omni.se/)
|
||||
1. [openEuler](https://openeuler.org)
|
||||
1. [openGauss](https://opengauss.org/)
|
||||
1. [OpenGov](https://opengov.com)
|
||||
1. [openLooKeng](https://openlookeng.io)
|
||||
1. [OpenSaaS Studio](https://opensaas.studio)
|
||||
1. [Opensurvey](https://www.opensurvey.co.kr/)
|
||||
1. [OpsMx](https://opsmx.io)
|
||||
1. [OpsVerse](https://opsverse.io)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
@@ -169,11 +182,13 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [Pandosearch](https://www.pandosearch.com/en/home)
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
1. [Patreon](https://www.patreon.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Pigment](https://www.gopigment.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Pismo](https://pismo.io/)
|
||||
1. [Platform9 Systems](https://platform9.com/)
|
||||
1. [Polarpoint.io](https://polarpoint.io)
|
||||
1. [PostFinance](https://github.com/postfinance)
|
||||
1. [Preferred Networks](https://preferred.jp/en/)
|
||||
@@ -186,6 +201,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [RapidAPI](https://www.rapidapi.com/)
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [Redpill Linpro](https://www.redpill-linpro.com/)
|
||||
1. [Reenigne Cloud](https://reenigne.ca)
|
||||
1. [reev.com](https://www.reev.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
@@ -195,11 +212,13 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Sauce Labs](https://saucelabs.com/)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Smood.ch](https://www.smood.ch/)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Softway Medical](https://www.softwaymedical.fr/)
|
||||
@@ -236,6 +255,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Vectra](https://www.vectra.ai)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Vinted](https://vinted.com/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
|
||||
@@ -17,6 +17,7 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -29,9 +30,12 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
@@ -42,6 +46,8 @@ import (
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -53,7 +59,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
preservedAnnotations = []string{
|
||||
defaultPreservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
}
|
||||
@@ -71,7 +77,7 @@ type ApplicationSetReconciler struct {
|
||||
utils.Policy
|
||||
utils.Renderer
|
||||
|
||||
EnableProgressiveRollouts bool
|
||||
EnableProgressiveSyncs bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -142,7 +148,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
// appSyncMap tracks which apps will be synced during this reconciliation.
|
||||
appSyncMap := map[string]bool{}
|
||||
|
||||
if r.EnableProgressiveRollouts && applicationSetInfo.Spec.Strategy != nil {
|
||||
if r.EnableProgressiveSyncs && applicationSetInfo.Spec.Strategy != nil {
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
@@ -152,9 +158,9 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveRollouts(ctx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive rollouts reconciliation for application set: %w", err)
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,9 +192,9 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
)
|
||||
}
|
||||
|
||||
if r.EnableProgressiveRollouts {
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveRolloutStrategyEnabled(&applicationSetInfo, "RollingSync") {
|
||||
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
|
||||
validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
|
||||
if err != nil {
|
||||
@@ -512,7 +518,7 @@ func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov
|
||||
return res, applicationSetReason, firstError
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
|
||||
// grab the job object, extract the owner...
|
||||
app := rawObj.(*argov1alpha1.Application)
|
||||
@@ -531,9 +537,11 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return fmt.Errorf("error setting up with manager: %w", err)
|
||||
}
|
||||
|
||||
ownsHandler := getOwnsHandlerPredicates(enableProgressiveSyncs)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&argov1alpha1.ApplicationSet{}).
|
||||
Owns(&argov1alpha1.Application{}).
|
||||
Owns(&argov1alpha1.Application{}, builder.WithPredicates(ownsHandler)).
|
||||
Watches(
|
||||
&source.Kind{Type: &corev1.Secret{}},
|
||||
&clusterSecretEventHandler{
|
||||
@@ -563,7 +571,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
Namespace: generatedApp.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
}
|
||||
@@ -577,9 +585,15 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
found.Operation = generatedApp.Operation
|
||||
}
|
||||
|
||||
preservedAnnotations := make([]string, 0)
|
||||
if applicationSet.Spec.PreservedFields != nil {
|
||||
preservedAnnotations = append(preservedAnnotations, applicationSet.Spec.PreservedFields.Annotations...)
|
||||
}
|
||||
// Preserve specially treated argo cd annotations:
|
||||
// * https://github.com/argoproj/applicationset/issues/180
|
||||
// * https://github.com/argoproj/argo-cd/issues/10500
|
||||
preservedAnnotations = append(preservedAnnotations, defaultPreservedAnnotations...)
|
||||
|
||||
for _, key := range preservedAnnotations {
|
||||
if state, exists := found.ObjectMeta.Annotations[key]; exists {
|
||||
if generatedApp.Annotations == nil {
|
||||
@@ -775,24 +789,29 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveRollouts(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
|
||||
_, err := r.updateApplicationSetApplicationStatus(ctx, &appset, applications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
|
||||
}
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
for i, step := range appDependencyList {
|
||||
log.Infof("step %v: %+v", i+1, step)
|
||||
}
|
||||
|
||||
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app sync map: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("appSyncMap: %+v", appSyncMap)
|
||||
log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap)
|
||||
if err != nil {
|
||||
@@ -815,7 +834,7 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, a
|
||||
}
|
||||
|
||||
steps := []argov1alpha1.ApplicationSetRolloutStep{}
|
||||
if progressiveRolloutStrategyEnabled(&applicationSet, "RollingSync") {
|
||||
if progressiveSyncsStrategyEnabled(&applicationSet, "RollingSync") {
|
||||
steps = applicationSet.Spec.Strategy.RollingSync.Steps
|
||||
}
|
||||
|
||||
@@ -941,7 +960,7 @@ func (r *ApplicationSetReconciler) buildAppSyncMap(ctx context.Context, applicat
|
||||
|
||||
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
|
||||
|
||||
if progressiveRolloutStrategyEnabled(appset, "RollingSync") {
|
||||
if progressiveSyncsStrategyEnabled(appset, "RollingSync") {
|
||||
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
|
||||
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
|
||||
}
|
||||
@@ -949,7 +968,7 @@ func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1al
|
||||
return true
|
||||
}
|
||||
|
||||
func progressiveRolloutStrategyEnabled(appset *argov1alpha1.ApplicationSet, strategyType string) bool {
|
||||
func progressiveSyncsStrategyEnabled(appset *argov1alpha1.ApplicationSet, strategyType string) bool {
|
||||
if appset.Spec.Strategy == nil || appset.Spec.Strategy.Type != strategyType {
|
||||
return false
|
||||
}
|
||||
@@ -982,7 +1001,7 @@ func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
}
|
||||
|
||||
// check the status of each Application's status and promote Applications to the next status if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
|
||||
now := metav1.Now()
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
|
||||
@@ -993,22 +1012,24 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
|
||||
currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
|
||||
if idx == -1 {
|
||||
// AppStatus not found, set default status of "Waiting"
|
||||
appStatuses = append(appStatuses, argov1alpha1.ApplicationSetApplicationStatus{
|
||||
currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{
|
||||
Application: app.Name,
|
||||
LastTransitionTime: &now,
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
})
|
||||
break
|
||||
Step: fmt.Sprint(appStepMap[app.Name] + 1),
|
||||
}
|
||||
} else {
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
|
||||
}
|
||||
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
|
||||
appOutdated := false
|
||||
if progressiveRolloutStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
appOutdated = syncStatusString == "OutOfSync"
|
||||
}
|
||||
|
||||
@@ -1017,14 +1038,22 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Pending" {
|
||||
if healthStatusString == "Progressing" || operationPhaseString == "Running" {
|
||||
if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
|
||||
log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1033,6 +1062,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
@@ -1040,6 +1070,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
@@ -1065,7 +1096,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
totalCountMap := []int{}
|
||||
|
||||
length := 0
|
||||
if progressiveRolloutStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
length = len(applicationSet.Spec.Strategy.RollingSync.Steps)
|
||||
}
|
||||
for s := 0; s < length; s++ {
|
||||
@@ -1077,7 +1108,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
totalCountMap[appStepMap[appStatus.Application]] += 1
|
||||
|
||||
if progressiveRolloutStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
@@ -1088,7 +1119,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
|
||||
maxUpdateAllowed := true
|
||||
maxUpdate := &intstr.IntOrString{}
|
||||
if progressiveRolloutStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
maxUpdate = applicationSet.Spec.Strategy.RollingSync.Steps[appStepMap[appStatus.Application]].MaxUpdate
|
||||
}
|
||||
|
||||
@@ -1116,6 +1147,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1)
|
||||
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
@@ -1263,7 +1295,7 @@ func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, ap
|
||||
return rolloutApps, nil
|
||||
}
|
||||
|
||||
// used by the RollingSync Progressive Rollout strategy to trigger a sync of a particular Application resource
|
||||
// used by the RollingSync Progressive Sync strategy to trigger a sync of a particular Application resource
|
||||
func syncApplication(application argov1alpha1.Application, prune bool) (argov1alpha1.Application, error) {
|
||||
|
||||
operation := argov1alpha1.Operation{
|
||||
@@ -1294,4 +1326,73 @@ func syncApplication(application argov1alpha1.Application, prune bool) (argov1al
|
||||
return application, nil
|
||||
}
|
||||
|
||||
func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
return predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
// if we are the owner and there is a create event, we most likely created it and do not need to
|
||||
// re-reconcile
|
||||
log.Debugln("received create event from owning an application")
|
||||
return false
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
log.Debugln("received delete event from owning an application")
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
log.Debugln("received update event from owning an application")
|
||||
appOld, isApp := e.ObjectOld.(*argov1alpha1.Application)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
appNew, isApp := e.ObjectNew.(*argov1alpha1.Application)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
|
||||
log.Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
|
||||
return requeue
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
log.Debugln("received generic event from owning an application")
|
||||
return true
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRequeueApplicationSet determines when we want to requeue an ApplicationSet for reconciling based on an owned
|
||||
// application change
|
||||
// The applicationset controller owns a subset of the Application CR.
|
||||
// We do not need to re-reconcile if parts of the application change outside the applicationset's control.
|
||||
// An example being, Application.ApplicationStatus.ReconciledAt which gets updated by the application controller.
|
||||
// Additionally, Application.ObjectMeta.ResourceVersion and Application.ObjectMeta.Generation which are set by K8s.
|
||||
func shouldRequeueApplicationSet(appOld *argov1alpha1.Application, appNew *argov1alpha1.Application, enableProgressiveSyncs bool) bool {
|
||||
if appOld == nil || appNew == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// the applicationset controller owns the application spec, labels, annotations, and finalizers on the applications
|
||||
if !reflect.DeepEqual(appOld.Spec, appNew.Spec) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetAnnotations(), appNew.ObjectMeta.GetAnnotations()) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetLabels(), appNew.ObjectMeta.GetLabels()) ||
|
||||
!reflect.DeepEqual(appOld.ObjectMeta.GetFinalizers(), appNew.ObjectMeta.GetFinalizers()) {
|
||||
return true
|
||||
}
|
||||
|
||||
// progressive syncs use the application status for updates. if they differ, requeue to trigger the next progression
|
||||
if enableProgressiveSyncs {
|
||||
if appOld.Status.Health.Status != appNew.Status.Health.Status || appOld.Status.Sync.Status != appNew.Status.Sync.Status {
|
||||
return true
|
||||
}
|
||||
|
||||
if appOld.Status.OperationState != nil && appNew.Status.OperationState != nil {
|
||||
if appOld.Status.OperationState.Phase != appNew.Status.OperationState.Phase ||
|
||||
appOld.Status.OperationState.StartedAt != appNew.Status.OperationState.StartedAt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
@@ -46,7 +47,6 @@ type addRateLimitingInterface interface {
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingInterface, object client.Object) {
|
||||
|
||||
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
|
||||
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
|
||||
return
|
||||
@@ -73,6 +73,40 @@ func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingI
|
||||
foundClusterGenerator = true
|
||||
break
|
||||
}
|
||||
|
||||
if generator.Matrix != nil {
|
||||
ok, err := nestedGeneratorsHaveClusterGenerator(generator.Matrix.Generators)
|
||||
if err != nil {
|
||||
h.Log.
|
||||
WithFields(log.Fields{
|
||||
"namespace": appSet.GetNamespace(),
|
||||
"name": appSet.GetName(),
|
||||
}).
|
||||
WithError(err).
|
||||
Error("Unable to check if ApplicationSet matrix generators have cluster generator")
|
||||
}
|
||||
if ok {
|
||||
foundClusterGenerator = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if generator.Merge != nil {
|
||||
ok, err := nestedGeneratorsHaveClusterGenerator(generator.Merge.Generators)
|
||||
if err != nil {
|
||||
h.Log.
|
||||
WithFields(log.Fields{
|
||||
"namespace": appSet.GetNamespace(),
|
||||
"name": appSet.GetName(),
|
||||
}).
|
||||
WithError(err).
|
||||
Error("Unable to check if ApplicationSet merge generators have cluster generator")
|
||||
}
|
||||
if ok {
|
||||
foundClusterGenerator = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if foundClusterGenerator {
|
||||
|
||||
@@ -82,3 +116,42 @@ func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingI
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nestedGeneratorsHaveClusterGenerator iterate over provided nested generators to check if they have a cluster generator.
|
||||
func nestedGeneratorsHaveClusterGenerator(generators []argoprojiov1alpha1.ApplicationSetNestedGenerator) (bool, error) {
|
||||
for _, generator := range generators {
|
||||
if ok, err := nestedGeneratorHasClusterGenerator(generator); ok || err != nil {
|
||||
return ok, err
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// nestedGeneratorHasClusterGenerator checks if the provided generator has a cluster generator.
|
||||
func nestedGeneratorHasClusterGenerator(nested argoprojiov1alpha1.ApplicationSetNestedGenerator) (bool, error) {
|
||||
if nested.Clusters != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if nested.Matrix != nil {
|
||||
nestedMatrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(nested.Matrix)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to get nested matrix generator: %w", err)
|
||||
}
|
||||
if nestedMatrix != nil {
|
||||
return nestedGeneratorsHaveClusterGenerator(nestedMatrix.ToMatrixGenerator().Generators)
|
||||
}
|
||||
}
|
||||
|
||||
if nested.Merge != nil {
|
||||
nestedMerge, err := argoprojiov1alpha1.ToNestedMergeGenerator(nested.Merge)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to get nested merge generator: %w", err)
|
||||
}
|
||||
if nestedMerge != nil {
|
||||
return nestedGeneratorsHaveClusterGenerator(nestedMerge.ToMergeGenerator().Generators)
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -163,7 +164,6 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
@@ -189,6 +189,348 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "a matrix generator with a cluster generator should produce a request",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Matrix: &argov1alpha1.MatrixGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{{
|
||||
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "a matrix generator with non cluster generator should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Matrix: &argov1alpha1.MatrixGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: &argov1alpha1.ListGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "a matrix generator with a nested matrix generator containing a cluster generator should produce a request",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Matrix: &argov1alpha1.MatrixGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Matrix: &apiextensionsv1.JSON{
|
||||
Raw: []byte(
|
||||
`{
|
||||
"generators": [
|
||||
{
|
||||
"clusters": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"argocd.argoproj.io/secret-type": "cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{{
|
||||
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "a matrix generator with a nested matrix generator containing non cluster generator should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Matrix: &argov1alpha1.MatrixGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Matrix: &apiextensionsv1.JSON{
|
||||
Raw: []byte(
|
||||
`{
|
||||
"generators": [
|
||||
{
|
||||
"list": {
|
||||
"elements": [
|
||||
"a",
|
||||
"b"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "a merge generator with a cluster generator should produce a request",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{{
|
||||
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "a matrix generator with non cluster generator should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: &argov1alpha1.ListGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "a merge generator with a nested merge generator containing a cluster generator should produce a request",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Merge: &apiextensionsv1.JSON{
|
||||
Raw: []byte(
|
||||
`{
|
||||
"generators": [
|
||||
{
|
||||
"clusters": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"argocd.argoproj.io/secret-type": "cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{{
|
||||
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "a merge generator with a nested merge generator containing non cluster generator should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Merge: &apiextensionsv1.JSON{
|
||||
Raw: []byte(
|
||||
`{
|
||||
"generators": [
|
||||
{
|
||||
"list": {
|
||||
"elements": [
|
||||
"a",
|
||||
"b"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
179
applicationset/controllers/requeue_after_test.go
Normal file
179
applicationset/controllers/requeue_after_test.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
mockServer := argoCDServiceMock{}
|
||||
ctx := context.Background()
|
||||
scheme := runtime.NewScheme()
|
||||
err := argov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{{
|
||||
Group: "mallard.io",
|
||||
Version: "v1",
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
appClientset := kubefake.NewSimpleClientset()
|
||||
k8sClient := fake.NewClientBuilder().Build()
|
||||
duckType := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v2quack",
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "mightyduck",
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "all-species"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "staging-01",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(terminalGenerators),
|
||||
"Merge": generators.NewMergeGenerator(terminalGenerators),
|
||||
}
|
||||
|
||||
topLevelGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(nestedGenerators),
|
||||
"Merge": generators.NewMergeGenerator(nestedGenerators),
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(0),
|
||||
Generators: topLevelGenerators,
|
||||
}
|
||||
|
||||
type args struct {
|
||||
appset *argov1alpha1.ApplicationSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want time.Duration
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{name: "Cluster", args: args{appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{Clusters: &argov1alpha1.ClusterGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
{name: "ClusterMergeNested", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argov1alpha1.ClusterGenerator{}},
|
||||
{Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
Git: &argov1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ClusterMatrixNested", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argov1alpha1.ClusterGenerator{}},
|
||||
{Matrix: &argov1alpha1.MatrixGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
Git: &argov1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ListGenerator", args: args{appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{List: &argov1alpha1.ListGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
key:
|
||||
components:
|
||||
- name: component1
|
||||
chart: podinfo
|
||||
version: "6.3.2"
|
||||
releaseName: component1
|
||||
repoUrl: "https://stefanprodan.github.io/podinfo"
|
||||
namespace: component1
|
||||
- name: component2
|
||||
chart: podinfo
|
||||
version: "6.3.3"
|
||||
releaseName: component2
|
||||
repoUrl: "ghcr.io/stefanprodan/charts"
|
||||
namespace: component2
|
||||
@@ -51,6 +51,8 @@ func NewClusterGenerator(c client.Client, ctx context.Context, clientset kuberne
|
||||
return g
|
||||
}
|
||||
|
||||
// GetRequeueAfter never requeue the cluster generator because the `clusterSecretEventHandler` will requeue the appsets
|
||||
// when the cluster secrets change
|
||||
func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
@@ -25,7 +24,7 @@ type TransformResult struct {
|
||||
Template argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
//Transform a spec generator to list of paramSets and a template
|
||||
// Transform a spec generator to list of paramSets and a template
|
||||
func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, allGenerators map[string]Generator, baseTemplate argoprojiov1alpha1.ApplicationSetTemplate, appSet *argoprojiov1alpha1.ApplicationSet, genParams map[string]interface{}) ([]TransformResult, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(requestedGenerator.Selector)
|
||||
if err != nil {
|
||||
@@ -132,27 +131,15 @@ func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.
|
||||
return *dest, err
|
||||
}
|
||||
|
||||
// Currently for Matrix Generator. Allows interpolating the matrix's 2nd child generator with values from the 1st child generator
|
||||
// InterpolateGenerator allows interpolating the matrix's 2nd child generator with values from the 1st child generator
|
||||
// "params" parameter is an array, where each index corresponds to a generator. Each index contains a map w/ that generator's parameters.
|
||||
func InterpolateGenerator(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool) (argoprojiov1alpha1.ApplicationSetGenerator, error) {
|
||||
interpolatedGenerator := requestedGenerator.DeepCopy()
|
||||
tmplBytes, err := json.Marshal(interpolatedGenerator)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", interpolatedGenerator).Error("error marshalling requested generator for interpolation")
|
||||
return *interpolatedGenerator, err
|
||||
}
|
||||
|
||||
render := utils.Render{}
|
||||
replacedTmplStr, err := render.Replace(string(tmplBytes), params, useGoTemplate)
|
||||
interpolatedGenerator, err := render.RenderGeneratorParams(requestedGenerator, params, useGoTemplate)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("interpolatedGeneratorString", replacedTmplStr).Error("error interpolating generator with other generator's parameter")
|
||||
log.WithError(err).WithField("interpolatedGenerator", interpolatedGenerator).Error("error interpolating generator with other generator's parameter")
|
||||
return *interpolatedGenerator, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(replacedTmplStr), interpolatedGenerator)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", interpolatedGenerator).Error("error unmarshalling requested generator for interpolation")
|
||||
return *interpolatedGenerator, err
|
||||
}
|
||||
return *interpolatedGenerator, nil
|
||||
}
|
||||
|
||||
@@ -6,9 +6,11 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -159,8 +161,8 @@ func getMockClusterGenerator() Generator {
|
||||
}
|
||||
|
||||
func getMockGitGenerator() Generator {
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
return gitGenerator
|
||||
}
|
||||
@@ -248,6 +250,60 @@ func TestInterpolateGenerator(t *testing.T) {
|
||||
Path: "{{server}}",
|
||||
}
|
||||
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
Files: append([]argoprojiov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}
|
||||
clusterGeneratorParams := map[string]interface{}{
|
||||
"name": "production_01/west", "server": "https://production-01.example.com",
|
||||
}
|
||||
interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, false)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
return
|
||||
}
|
||||
assert.Equal(t, "production_01/west", interpolatedGenerator.Git.Files[0].Path)
|
||||
assert.Equal(t, "https://production-01.example.com", interpolatedGenerator.Git.Files[1].Path)
|
||||
}
|
||||
|
||||
func TestInterpolateGenerator_go(t *testing.T) {
|
||||
requestedGenerator := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"path-basename": "{{base .path.path}}",
|
||||
"path-zero": "{{index .path.segments 0}}",
|
||||
"path-full": "{{.path.path}}",
|
||||
"kubernetes.io/environment": `{{default "foo" .my_label}}`,
|
||||
}},
|
||||
},
|
||||
}
|
||||
gitGeneratorParams := map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "p1/p2/app3",
|
||||
"segments": []string{"p1", "p2", "app3"},
|
||||
},
|
||||
}
|
||||
interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, true)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
return
|
||||
}
|
||||
assert.Equal(t, "app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-basename"])
|
||||
assert.Equal(t, "p1", interpolatedGenerator.Clusters.Selector.MatchLabels["path-zero"])
|
||||
assert.Equal(t, "p1/p2/app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-full"])
|
||||
|
||||
fileNamePath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{.name}}",
|
||||
}
|
||||
fileServerPath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{.server}}",
|
||||
}
|
||||
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
Files: append([]argoprojiov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
|
||||
@@ -58,9 +58,9 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
|
||||
var err error
|
||||
var res []map[string]interface{}
|
||||
if appSetGenerator.Git.Directories != nil {
|
||||
if len(appSetGenerator.Git.Directories) != 0 {
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate)
|
||||
} else if appSetGenerator.Git.Files != nil {
|
||||
} else if len(appSetGenerator.Git.Files) != 0 {
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate)
|
||||
} else {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
@@ -81,10 +81,10 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"allPaths": allPaths,
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
"allPaths": allPaths,
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
"pathParamPrefix": appSetGenerator.Git.PathParamPrefix,
|
||||
}).Info("applications result from the repo service")
|
||||
|
||||
@@ -127,9 +127,7 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
return nil, fmt.Errorf("unable to process file '%s': %v", path, err)
|
||||
}
|
||||
|
||||
for index := range paramsArray {
|
||||
res = append(res, paramsArray[index])
|
||||
}
|
||||
res = append(res, paramsArray...)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -183,7 +181,7 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
}
|
||||
pathParamName := "path"
|
||||
if pathParamPrefix != "" {
|
||||
pathParamName = pathParamPrefix+"."+pathParamName
|
||||
pathParamName = pathParamPrefix + "." + pathParamName
|
||||
}
|
||||
params[pathParamName] = path.Dir(filePath)
|
||||
params[pathParamName+".basename"] = path.Base(params[pathParamName].(string))
|
||||
@@ -251,7 +249,7 @@ func (g *GitGenerator) generateParamsFromApps(requestedApps []string, appSetGene
|
||||
} else {
|
||||
pathParamName := "path"
|
||||
if appSetGenerator.Git.PathParamPrefix != "" {
|
||||
pathParamName = appSetGenerator.Git.PathParamPrefix+"."+pathParamName
|
||||
pathParamName = appSetGenerator.Git.PathParamPrefix + "." + pathParamName
|
||||
}
|
||||
params[pathParamName] = a
|
||||
params[pathParamName+".basename"] = path.Base(a)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@@ -9,6 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -20,33 +20,6 @@ import (
|
||||
// return io.NewCloser(func() error { return nil }), c.RepoServerServiceClient, nil
|
||||
// }
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFile(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
@@ -271,9 +244,9 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
@@ -301,7 +274,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -566,9 +539,9 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
@@ -597,7 +570,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -857,8 +830,8 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
@@ -887,7 +860,7 @@ cluster:
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1206,8 +1179,8 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
@@ -1237,7 +1210,7 @@ cluster:
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var _ Generator = (*ListGenerator)(nil)
|
||||
@@ -73,5 +74,16 @@ func (g *ListGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Appli
|
||||
}
|
||||
}
|
||||
|
||||
// Append elements from ElementsYaml to the response
|
||||
if len(appSetGenerator.List.ElementsYaml) > 0 {
|
||||
|
||||
var yamlElements []map[string]interface{}
|
||||
err := yaml.Unmarshal([]byte(appSetGenerator.List.ElementsYaml), &yamlElements)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshling decoded ElementsYaml %v", err)
|
||||
}
|
||||
res = append(res, yamlElements...)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -50,10 +50,17 @@ func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requiresInterpolation := false // Try to generate 2nd generator's params without interpolation
|
||||
g1, err := m.getParams(appSetGenerator.Matrix.Generators[1], appSet, nil)
|
||||
if err != nil || g1 == nil {
|
||||
requiresInterpolation = true
|
||||
}
|
||||
for _, a := range g0 {
|
||||
g1, err := m.getParams(appSetGenerator.Matrix.Generators[1], appSet, a)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get params for second generator in the matrix generator: %w", err)
|
||||
if requiresInterpolation {
|
||||
g1, err = m.getParams(appSetGenerator.Matrix.Generators[1], appSet, a)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get params for second generator in the matrix generator: %w", err)
|
||||
}
|
||||
}
|
||||
for _, b := range g1 {
|
||||
|
||||
@@ -80,28 +87,13 @@ func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet, params map[string]interface{}) ([]map[string]interface{}, error) {
|
||||
var matrix *argoprojiov1alpha1.MatrixGenerator
|
||||
if appSetBaseGenerator.Matrix != nil {
|
||||
// Since nested matrix generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here.
|
||||
nestedMatrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(appSetBaseGenerator.Matrix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unmarshall nested matrix generator: %v", err)
|
||||
}
|
||||
if nestedMatrix != nil {
|
||||
matrix = nestedMatrix.ToMatrixGenerator()
|
||||
}
|
||||
matrixGen, err := getMatrixGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mergeGenerator *argoprojiov1alpha1.MergeGenerator
|
||||
if appSetBaseGenerator.Merge != nil {
|
||||
// Since nested merge generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here.
|
||||
nestedMerge, err := argoprojiov1alpha1.ToNestedMergeGenerator(appSetBaseGenerator.Merge)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unmarshall nested merge generator: %v", err)
|
||||
}
|
||||
if nestedMerge != nil {
|
||||
mergeGenerator = nestedMerge.ToMergeGenerator()
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
@@ -112,8 +104,8 @@ func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Appli
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrix,
|
||||
Merge: mergeGenerator,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
Selector: appSetBaseGenerator.Selector,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
@@ -143,10 +135,15 @@ func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Ap
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Matrix.Generators {
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
@@ -167,6 +164,17 @@ func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Ap
|
||||
|
||||
}
|
||||
|
||||
func getMatrixGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MatrixGenerator, error) {
|
||||
if r.Matrix == nil {
|
||||
return nil, nil
|
||||
}
|
||||
matrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(r.Matrix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return matrix.ToMatrixGenerator(), nil
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Matrix.Template
|
||||
}
|
||||
|
||||
@@ -2,9 +2,11 @@ package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/sirupsen/logrus"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -16,6 +18,7 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -399,6 +402,8 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
pullRequestGenerator := &argoprojiov1alpha1.PullRequestGenerator{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
@@ -431,6 +436,31 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
gitGetRequeueAfter: time.Duration(1),
|
||||
expected: time.Duration(1),
|
||||
},
|
||||
{
|
||||
name: "returns the minimal time for pull request",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: time.Duration(15 * time.Second),
|
||||
expected: time.Duration(15 * time.Second),
|
||||
},
|
||||
{
|
||||
name: "returns the default time if no requeueAfterSeconds is provided",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
expected: time.Duration(30 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -441,16 +471,18 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
PullRequest: g.PullRequest,
|
||||
}
|
||||
mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil)
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"PullRequest": &PullRequestGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -806,6 +838,441 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatrixGenerateListElementsYaml(t *testing.T) {
|
||||
|
||||
gitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: []argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
{Path: "config.yaml"},
|
||||
},
|
||||
}
|
||||
|
||||
listGenerator := &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{},
|
||||
ElementsYaml: "{{ .foo.bar | toJson }}",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
expectedErr error
|
||||
expected []map[string]interface{}
|
||||
}{
|
||||
{
|
||||
name: "happy flow - generate params",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
"foo": map[string]interface{}{
|
||||
"bar": []interface{}{
|
||||
map[string]interface{}{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"basename": "dir",
|
||||
"basenameNormalized": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"path": "path/dir",
|
||||
"segments": []string {
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
"foo": map[string]interface{}{
|
||||
"bar": []interface{}{
|
||||
map[string]interface{}{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"basename": "dir",
|
||||
"basenameNormalized": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"path": "path/dir",
|
||||
"segments": []string {
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorMock{}
|
||||
appSet := &argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return([]map[string]any{{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": []interface{}{
|
||||
map[string]interface{}{
|
||||
"chart": "a",
|
||||
"version": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"chart": "b",
|
||||
"version": "2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"basename": "dir",
|
||||
"basenameNormalized": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"path": "path/dir",
|
||||
"segments": []string {
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
|
||||
}}, nil)
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&argoprojiov1alpha1.ApplicationSetTemplate{})
|
||||
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": genMock,
|
||||
"List": &ListGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}, appSet)
|
||||
|
||||
if testCaseCopy.expectedErr != nil {
|
||||
assert.ErrorIs(t, err, testCaseCopy.expectedErr)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkipInterpolatedMatrixCalls(t *testing.T) {
|
||||
interpolatedGitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: []argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
{Path: "examples/git-generator-files-discovery/cluster-config/{{name}}.json"},
|
||||
},
|
||||
}
|
||||
|
||||
nonInterpolatedGitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: []argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
{Path: "examples/git-generator-files-discovery/cluster-config/base.json"},
|
||||
},
|
||||
}
|
||||
|
||||
interpolatedClusterGenerator := &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: nil,
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "Exists",
|
||||
Values: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
expectedErr error
|
||||
expected []map[string]interface{}
|
||||
clientError bool
|
||||
expectedMock [][]map[string]interface{}
|
||||
expectedNumCalls int
|
||||
}{
|
||||
{
|
||||
name: "happy flow - generate noninterpolated params",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{ Clusters: interpolatedClusterGenerator },
|
||||
{ Git: nonInterpolatedGitGenerator },
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/base.json",
|
||||
"path.basename": "base",
|
||||
"path.basenameNormalized": "base",
|
||||
"path.filename": "base.json",
|
||||
"path.filenameNormalized": "base.json",
|
||||
"path[0]": "examples",
|
||||
"path[1]": "git-generator-files-discovery",
|
||||
"path[2]": "base",
|
||||
"name": "dev-01",
|
||||
"nameNormalized": "dev-01",
|
||||
"server": "https://dev-01.example.com",
|
||||
"metadata.labels.environment": "dev",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster",
|
||||
},
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/base.json",
|
||||
"path.basename": "base",
|
||||
"path.basenameNormalized": "base",
|
||||
"path.filename": "base.json",
|
||||
"path.filenameNormalized": "base.json",
|
||||
"path[0]": "examples",
|
||||
"path[1]": "git-generator-files-discovery",
|
||||
"path[2]": "base",
|
||||
"name": "prod-01",
|
||||
"nameNormalized": "prod-01",
|
||||
"server": "https://prod-01.example.com",
|
||||
"metadata.labels.environment": "prod",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster",
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
expectedMock: [][]map[string]interface{}{
|
||||
{
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/base.json",
|
||||
"path.basename": "base",
|
||||
"path.basenameNormalized": "base",
|
||||
"path.filename": "base.json",
|
||||
"path.filenameNormalized": "base.json",
|
||||
"path[0]": "examples",
|
||||
"path[1]": "git-generator-files-discovery",
|
||||
"path[2]": "base",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNumCalls: 1,
|
||||
},
|
||||
{
|
||||
name: "happy flow - generate interpolated params",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{ Clusters: interpolatedClusterGenerator },
|
||||
{ Git: interpolatedGitGenerator },
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/dev-01.json",
|
||||
"path.basename": "dev-01",
|
||||
"path.basenameNormalized": "dev-01",
|
||||
"path.filename": "dev-01.json",
|
||||
"path.filenameNormalized": "dev-01.json",
|
||||
"path[0]": "examples",
|
||||
"path[1]": "git-generator-files-discovery",
|
||||
"path[2]": "dev-01",
|
||||
"name": "dev-01",
|
||||
"nameNormalized": "dev-01",
|
||||
"server": "https://dev-01.example.com",
|
||||
"metadata.labels.environment": "dev",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster",
|
||||
},
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/prod-01.json",
|
||||
"path.basename": "prod-01",
|
||||
"path.basenameNormalized": "prod-01",
|
||||
"path.filename": "prod-01.json",
|
||||
"path.filenameNormalized": "prod-01.json",
|
||||
"path[0]": "examples",
|
||||
"path[1]": "git-generator-files-discovery",
|
||||
"path[2]": "prod-01",
|
||||
"name": "prod-01",
|
||||
"nameNormalized": "prod-01",
|
||||
"server": "https://prod-01.example.com",
|
||||
"metadata.labels.environment": "prod",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster",
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
expectedMock: [][]map[string]interface{}{
|
||||
{}, // Needed, because the 1st call will fail without interpolation
|
||||
{ // Subsequent calls will succeed with interpolation
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/dev-01.json",
|
||||
"path.basename": "dev-01",
|
||||
"path.basenameNormalized": "dev-01",
|
||||
"path.filename": "dev-01.json",
|
||||
"path.filenameNormalized": "dev-01.json",
|
||||
"path[0]": "examples",
|
||||
"path[1]": "git-generator-files-discovery",
|
||||
"path[2]": "dev-01",
|
||||
},
|
||||
},
|
||||
{
|
||||
{
|
||||
"path": "examples/git-generator-files-discovery/prod-01.json",
|
||||
"path.basename": "prod-01",
|
||||
"path.basenameNormalized": "prod-01",
|
||||
"path.filename": "prod-01.json",
|
||||
"path.filenameNormalized": "prod-01.json",
|
||||
"path[0]": "examples",
|
||||
"path[1]": "git-generator-files-discovery",
|
||||
"path[2]": "prod-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNumCalls: 3,
|
||||
},
|
||||
}
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dev-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "dev",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("dev-01"),
|
||||
"server": []byte("https://dev-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "prod-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "prod",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("prod-01"),
|
||||
"server": []byte("https://prod-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
genMock := &generatorMock{}
|
||||
appSet := &argoprojiov1alpha1.ApplicationSet{}
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace")
|
||||
logrus.Debug(clusterGenerator)
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: g.Clusters,
|
||||
Git: g.Git,
|
||||
}
|
||||
|
||||
genMock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&argoprojiov1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
|
||||
for _, currMock := range testCaseCopy.expectedMock {
|
||||
genMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), appSet).Return(currMock, nil).Once()
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": genMock,
|
||||
"Clusters": clusterGenerator,
|
||||
},
|
||||
)
|
||||
|
||||
got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}, appSet)
|
||||
|
||||
genMock.AssertNumberOfCalls(t, "GenerateParams", testCase.expectedNumCalls)
|
||||
|
||||
if testCaseCopy.expectedErr != nil {
|
||||
assert.ErrorIs(t, err, testCaseCopy.expectedErr)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type generatorMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
@@ -828,3 +1295,72 @@ func (g *generatorMock) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Appl
|
||||
return args.Get(0).(time.Duration)
|
||||
|
||||
}
|
||||
|
||||
func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
// Given a matrix generator over a list generator and a git files generator, the nested git files generator should
|
||||
// be treated as a files generator, and it should produce parameters.
|
||||
|
||||
// This tests for a specific bug where a nested git files generator was being treated as a directory generator. This
|
||||
// happened because, when the matrix generator was being processed, the nested git files generator was being
|
||||
// interpolated by the deeplyReplace function. That function cannot differentiate between a nil slice and an empty
|
||||
// slice. So it was replacing the `Directories` field with an empty slice, which the ApplicationSet controller
|
||||
// interpreted as meaning this was a directory generator, not a files generator.
|
||||
|
||||
// Now instead of checking for nil, we check whether the field is a non-empty slice. This test prevents a regression
|
||||
// of that bug.
|
||||
|
||||
listGeneratorMock := &generatorMock{}
|
||||
listGeneratorMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), mock.AnythingOfType("*v1alpha1.ApplicationSet")).Return([]map[string]interface{}{
|
||||
{"some": "value"},
|
||||
}, nil)
|
||||
listGeneratorMock.On("GetTemplate", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator")).Return(&argoprojiov1alpha1.ApplicationSetTemplate{})
|
||||
|
||||
gitGeneratorSpec := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "https://git.example.com",
|
||||
Files: []argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
{Path: "some/path.json"},
|
||||
},
|
||||
}
|
||||
|
||||
repoServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
repoServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
"some/path.json": []byte("test: content"),
|
||||
}, nil)
|
||||
gitGenerator := NewGitGenerator(repoServiceMock)
|
||||
|
||||
matrixGenerator := NewMatrixGenerator(map[string]Generator{
|
||||
"List": listGeneratorMock,
|
||||
"Git": gitGenerator,
|
||||
})
|
||||
|
||||
matrixGeneratorSpec := &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{
|
||||
Raw: []byte(`{"some": "value"}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Git: gitGeneratorSpec,
|
||||
},
|
||||
},
|
||||
}
|
||||
params, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: matrixGeneratorSpec,
|
||||
}, &argoprojiov1alpha1.ApplicationSet{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []map[string]interface{}{{
|
||||
"path": "some",
|
||||
"path.basename": "some",
|
||||
"path.basenameNormalized": "some",
|
||||
"path.filename": "path.json",
|
||||
"path.filenameNormalized": "path.json",
|
||||
"path[0]": "some",
|
||||
"some": "value",
|
||||
"test": "content",
|
||||
}}, params)
|
||||
}
|
||||
|
||||
@@ -137,27 +137,13 @@ func getParamSetsByMergeKey(mergeKeys []string, paramSets []map[string]interface
|
||||
|
||||
// getParams get the parameters generated by this generator.
|
||||
func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
|
||||
var matrix *argoprojiov1alpha1.MatrixGenerator
|
||||
if appSetBaseGenerator.Matrix != nil {
|
||||
nestedMatrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(appSetBaseGenerator.Matrix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedMatrix != nil {
|
||||
matrix = nestedMatrix.ToMatrixGenerator()
|
||||
}
|
||||
matrixGen, err := getMatrixGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mergeGenerator *argoprojiov1alpha1.MergeGenerator
|
||||
if appSetBaseGenerator.Merge != nil {
|
||||
nestedMerge, err := argoprojiov1alpha1.ToNestedMergeGenerator(appSetBaseGenerator.Merge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedMerge != nil {
|
||||
mergeGenerator = nestedMerge.ToMergeGenerator()
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
@@ -168,8 +154,8 @@ func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Applic
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrix,
|
||||
Merge: mergeGenerator,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
Selector: appSetBaseGenerator.Selector,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
@@ -197,10 +183,15 @@ func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.App
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Merge.Generators {
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
@@ -221,6 +212,17 @@ func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.App
|
||||
|
||||
}
|
||||
|
||||
func getMergeGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MergeGenerator, error) {
|
||||
if r.Merge == nil {
|
||||
return nil, nil
|
||||
}
|
||||
merge, err := argoprojiov1alpha1.ToNestedMergeGenerator(r.Merge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return merge.ToMergeGenerator(), nil
|
||||
}
|
||||
|
||||
// GetTemplate gets the Template field for the MergeGenerator.
|
||||
func (m *MergeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Merge.Template
|
||||
|
||||
@@ -158,7 +158,7 @@ func TestListPullRequestBasicAuth(t *testing.T) {
|
||||
|
||||
func TestListResponseError(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(500)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer ts.Close()
|
||||
svc, _ := NewBitbucketServiceNoAuth(context.Background(), ts.URL, "PROJECT", "REPO")
|
||||
|
||||
@@ -29,7 +29,7 @@ func (c *ExtendedClient) GetContents(repo *Repository, path string) (bool, error
|
||||
urlStr += fmt.Sprintf("/repositories/%s/%s/src/%s/%s?format=meta", c.owner, repo.Repository, repo.SHA, path)
|
||||
body := strings.NewReader("")
|
||||
|
||||
req, err := http.NewRequest("GET", urlStr, body)
|
||||
req, err := http.NewRequest(http.MethodGet, urlStr, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -347,7 +347,7 @@ func TestGetBranchesMissingDefault(t *testing.T) {
|
||||
assert.Empty(t, r.Header.Get("Authorization"))
|
||||
switch r.RequestURI {
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default":
|
||||
http.Error(w, "Not found", 404)
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
defaultHandler(t)(w, r)
|
||||
}))
|
||||
@@ -370,7 +370,7 @@ func TestGetBranchesErrorDefaultBranch(t *testing.T) {
|
||||
assert.Empty(t, r.Header.Get("Authorization"))
|
||||
switch r.RequestURI {
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default":
|
||||
http.Error(w, "Internal server error", 500)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
}
|
||||
defaultHandler(t)(w, r)
|
||||
}))
|
||||
@@ -442,7 +442,7 @@ func TestListReposMissingDefaultBranch(t *testing.T) {
|
||||
assert.Empty(t, r.Header.Get("Authorization"))
|
||||
switch r.RequestURI {
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default":
|
||||
http.Error(w, "Not found", 404)
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
defaultHandler(t)(w, r)
|
||||
}))
|
||||
@@ -459,7 +459,7 @@ func TestListReposErrorDefaultBranch(t *testing.T) {
|
||||
assert.Empty(t, r.Header.Get("Authorization"))
|
||||
switch r.RequestURI {
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default":
|
||||
http.Error(w, "Internal server error", 500)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
}
|
||||
defaultHandler(t)(w, r)
|
||||
}))
|
||||
@@ -516,17 +516,17 @@ func TestBitbucketServerHasPath(t *testing.T) {
|
||||
_, err = io.WriteString(w, `{"type":"FILE"}`)
|
||||
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/anotherpkg/missing.txt?at=main&limit=100&type=true":
|
||||
http.Error(w, "The path \"anotherpkg/missing.txt\" does not exist at revision \"main\"", 404)
|
||||
http.Error(w, "The path \"anotherpkg/missing.txt\" does not exist at revision \"main\"", http.StatusNotFound)
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/notathing?at=main&limit=100&type=true":
|
||||
http.Error(w, "The path \"notathing\" does not exist at revision \"main\"", 404)
|
||||
http.Error(w, "The path \"notathing\" does not exist at revision \"main\"", http.StatusNotFound)
|
||||
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/return-redirect?at=main&limit=100&type=true":
|
||||
http.Redirect(w, r, "http://"+r.Host+"/rest/api/1.0/projects/PROJECT/repos/REPO/browse/redirected?at=main&limit=100&type=true", 301)
|
||||
http.Redirect(w, r, "http://"+r.Host+"/rest/api/1.0/projects/PROJECT/repos/REPO/browse/redirected?at=main&limit=100&type=true", http.StatusMovedPermanently)
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/redirected?at=main&limit=100&type=true":
|
||||
_, err = io.WriteString(w, `{"type":"DIRECTORY"}`)
|
||||
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/browse/unauthorized-response?at=main&limit=100&type=true":
|
||||
http.Error(w, "Authentication failed", 401)
|
||||
http.Error(w, "Authentication failed", http.StatusUnauthorized)
|
||||
|
||||
default:
|
||||
t.Fail()
|
||||
|
||||
@@ -47,7 +47,7 @@ func NewGiteaProvider(ctx context.Context, owner, token, url string, allBranches
|
||||
func (g *GiteaProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) {
|
||||
if !g.allBranches {
|
||||
branch, status, err := g.client.GetRepoBranch(g.owner, repo.Repository, repo.Branch)
|
||||
if status.StatusCode == 404 {
|
||||
if status.StatusCode == http.StatusNotFound {
|
||||
return nil, fmt.Errorf("got 404 while getting default branch %q for repo %q - check your repo config: %w", repo.Branch, repo.Repository, err)
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -247,7 +247,7 @@ func giteaMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
_, err := io.WriteString(w, testdata.ReposGiteaGoSdkContentsGiteaResponse)
|
||||
require.NoError(t, err)
|
||||
case "/api/v1/repos/gitea/go-sdk/contents/notathing?ref=master":
|
||||
w.WriteHeader(404)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, err := io.WriteString(w, `{"errors":["object does not exist [id: , rel_path: notathing]"],"message":"GetContentsOrList","url":"https://gitea.com/api/swagger"}`)
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-github/v35/github"
|
||||
@@ -123,7 +124,7 @@ func (g *GithubProvider) listBranches(ctx context.Context, repo *Repository) ([]
|
||||
if err != nil {
|
||||
var githubErrorResponse *github.ErrorResponse
|
||||
if errors.As(err, &githubErrorResponse) {
|
||||
if githubErrorResponse.Response.StatusCode == 404 {
|
||||
if githubErrorResponse.Response.StatusCode == http.StatusNotFound {
|
||||
// Default branch doesn't exist, so the repo is empty.
|
||||
return []github.Branch{}, nil
|
||||
}
|
||||
|
||||
@@ -196,7 +196,7 @@ func githubMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
t.Fail()
|
||||
}
|
||||
default:
|
||||
w.WriteHeader(404)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"net/http"
|
||||
pathpkg "path"
|
||||
|
||||
gitlab "github.com/xanzy/go-gitlab"
|
||||
@@ -144,7 +145,11 @@ func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gi
|
||||
branches := []gitlab.Branch{}
|
||||
// If we don't specifically want to query for all branches, just use the default branch and call it a day.
|
||||
if !g.allBranches {
|
||||
gitlabBranch, _, err := g.client.Branches.GetBranch(repo.RepositoryId, repo.Branch, nil)
|
||||
gitlabBranch, resp, err := g.client.Branches.GetBranch(repo.RepositoryId, repo.Branch, nil)
|
||||
// 404s are not an error here, just a normal false.
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return []gitlab.Branch{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -157,6 +162,10 @@ func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gi
|
||||
}
|
||||
for {
|
||||
gitlabBranches, resp, err := g.client.Branches.ListBranches(repo.RepositoryId, opt)
|
||||
// 404s are not an error here, just a normal false.
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return []gitlab.Branch{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -274,6 +274,8 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/projects/27084533/repository/branches/foo":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
default:
|
||||
_, err := io.WriteString(w, `[]`)
|
||||
if err != nil {
|
||||
@@ -391,3 +393,29 @@ func TestGitlabHasPath(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitlabGetBranches(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gitlabMockHandler(t)(w, r)
|
||||
}))
|
||||
host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true)
|
||||
|
||||
repo := &Repository{
|
||||
RepositoryId: 27084533,
|
||||
Branch: "master",
|
||||
}
|
||||
t.Run("branch exists", func(t *testing.T) {
|
||||
repos, err := host.GetBranches(context.Background(), repo)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, repos[0].Branch, "master")
|
||||
})
|
||||
|
||||
repo2 := &Repository{
|
||||
RepositoryId: 27084533,
|
||||
Branch: "foo",
|
||||
}
|
||||
t.Run("unknown branch", func(t *testing.T) {
|
||||
_, err := host.GetBranches(context.Background(), repo2)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
34
applicationset/utils/test/testutils.go
Normal file
34
applicationset/utils/test/testutils.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type ArgoCDServiceMock struct {
|
||||
Mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
|
||||
func (r *Render) RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *argoappsv1.ApplicationSetSyncPolicy, params map[string]interface{}, useGoTemplate bool) (*argoappsv1.Application, error) {
|
||||
if tmpl == nil {
|
||||
return nil, fmt.Errorf("application template is empty ")
|
||||
return nil, fmt.Errorf("application template is empty")
|
||||
}
|
||||
|
||||
if len(params) == 0 {
|
||||
@@ -204,6 +204,27 @@ func (r *Render) RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *
|
||||
return replacedTmpl, nil
|
||||
}
|
||||
|
||||
func (r *Render) RenderGeneratorParams(gen *argoappsv1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool) (*argoappsv1.ApplicationSetGenerator, error) {
|
||||
if gen == nil {
|
||||
return nil, fmt.Errorf("generator is empty")
|
||||
}
|
||||
|
||||
if len(params) == 0 {
|
||||
return gen, nil
|
||||
}
|
||||
|
||||
original := reflect.ValueOf(gen)
|
||||
copy := reflect.New(original.Type()).Elem()
|
||||
|
||||
if err := r.deeplyReplace(copy, original, params, useGoTemplate); err != nil {
|
||||
return nil, fmt.Errorf("failed to replace parameters in generator: %w", err)
|
||||
}
|
||||
|
||||
replacedGen := copy.Interface().(*argoappsv1.ApplicationSetGenerator)
|
||||
|
||||
return replacedGen, nil
|
||||
}
|
||||
|
||||
var isTemplatedRegex = regexp.MustCompile(".*{{.*}}.*")
|
||||
|
||||
// Replace executes basic string substitution of a template with replacement values.
|
||||
|
||||
@@ -133,7 +133,7 @@ func (h *WebhookHandler) Handler(w http.ResponseWriter, r *http.Request) {
|
||||
if err != nil {
|
||||
log.Infof("Webhook processing failed: %s", err)
|
||||
status := http.StatusBadRequest
|
||||
if r.Method != "POST" {
|
||||
if r.Method != http.MethodPost {
|
||||
status = http.StatusMethodNotAllowed
|
||||
}
|
||||
http.Error(w, fmt.Sprintf("Webhook processing failed: %s", html.EscapeString(err.Error())), status)
|
||||
|
||||
@@ -177,7 +177,7 @@ func TestWebhookHandler(t *testing.T) {
|
||||
h, err := NewWebhookHandler(namespace, set, fc, mockGenerators())
|
||||
assert.Nil(t, err)
|
||||
|
||||
req := httptest.NewRequest("POST", "/api/webhook", nil)
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/webhook", nil)
|
||||
req.Header.Set(test.headerKey, test.headerValue)
|
||||
eventJSON, err := os.ReadFile(filepath.Join("testdata", test.payloadFile))
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -271,6 +271,16 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -585,6 +595,16 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -3430,6 +3450,12 @@
|
||||
"description": "Google Cloud Platform service account key.",
|
||||
"name": "gcpServiceAccountKey",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to force HTTP basic auth.",
|
||||
"name": "forceHttpBasicAuth",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -3588,6 +3614,29 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/settings/plugins": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"SettingsService"
|
||||
],
|
||||
"summary": "Get returns Argo CD plugins",
|
||||
"operationId": "SettingsService_GetPlugins",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/clusterSettingsPluginsResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/stream/applications": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -3641,6 +3690,16 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -4342,6 +4401,17 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterSettingsPluginsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"plugins": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/clusterPlugin"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpgkeyGnuPGPublicKeyCreateResponse": {
|
||||
"type": "object",
|
||||
"title": "Response to a public key creation request",
|
||||
@@ -5635,6 +5705,17 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationPreservedFields": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSet": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSet is a set of Application resources\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status",
|
||||
@@ -5668,6 +5749,10 @@
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)"
|
||||
},
|
||||
"step": {
|
||||
"type": "string",
|
||||
"title": "Step tracks which step this Application should be updated in"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -5815,6 +5900,9 @@
|
||||
"goTemplate": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"preservedFields": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationPreservedFields"
|
||||
},
|
||||
"strategy": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetStrategy"
|
||||
},
|
||||
@@ -6057,6 +6145,10 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"commonAnnotationsEnvsubst": {
|
||||
"type": "boolean",
|
||||
"title": "CommonAnnotationsEnvsubst specifies whether to apply env variables substitution for annotation values"
|
||||
},
|
||||
"commonLabels": {
|
||||
"type": "object",
|
||||
"title": "CommonLabels is a list of additional labels to add to rendered manifests",
|
||||
@@ -6087,6 +6179,17 @@
|
||||
"type": "string",
|
||||
"title": "NameSuffix is a suffix appended to resources for Kustomize apps"
|
||||
},
|
||||
"namespace": {
|
||||
"type": "string",
|
||||
"title": "Namespace sets the namespace that Kustomize adds to all resources"
|
||||
},
|
||||
"replicas": {
|
||||
"type": "array",
|
||||
"title": "Replicas is a list of Kustomize Replicas override specifications",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeReplica"
|
||||
}
|
||||
},
|
||||
"version": {
|
||||
"type": "string",
|
||||
"title": "Version controls which version of Kustomize to use for rendering manifests"
|
||||
@@ -6919,6 +7022,18 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeReplica": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"$ref": "#/definitions/intstrIntOrString"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name of Deployment or StatefulSet"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ListGenerator": {
|
||||
"type": "object",
|
||||
"title": "ListGenerator include items info",
|
||||
@@ -6929,6 +7044,9 @@
|
||||
"$ref": "#/definitions/v1JSON"
|
||||
}
|
||||
},
|
||||
"elementsYaml": {
|
||||
"type": "string"
|
||||
},
|
||||
"template": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetTemplate"
|
||||
}
|
||||
@@ -7304,6 +7422,10 @@
|
||||
"type": "boolean",
|
||||
"title": "EnableOCI specifies whether helm-oci support should be enabled for this repo"
|
||||
},
|
||||
"forceHttpBasicAuth": {
|
||||
"type": "boolean",
|
||||
"title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections"
|
||||
},
|
||||
"gcpServiceAccountKey": {
|
||||
"type": "string",
|
||||
"title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos"
|
||||
@@ -7390,6 +7512,10 @@
|
||||
"type": "boolean",
|
||||
"title": "EnableOCI specifies whether helm-oci support should be enabled for this repo"
|
||||
},
|
||||
"forceHttpBasicAuth": {
|
||||
"type": "boolean",
|
||||
"title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections"
|
||||
},
|
||||
"gcpServiceAccountKey": {
|
||||
"type": "string",
|
||||
"title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos"
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -188,7 +188,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
|
||||
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
|
||||
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", 20, "Number of allowed concurrent kubectl fork/execs. Any value less the 1 means no limit.")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
|
||||
command.Flags().StringSliceVar(&metricsAplicationLabels, "metrics-application-labels", []string{}, "List of Application labels that will be added to the argocd_application_labels metric")
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services"
|
||||
appsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
@@ -46,21 +45,20 @@ func getSubmoduleEnabled() bool {
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
metricsAddr string
|
||||
probeBindAddr string
|
||||
webhookAddr string
|
||||
enableLeaderElection bool
|
||||
namespace string
|
||||
argocdRepoServer string
|
||||
policy string
|
||||
debugLog bool
|
||||
dryRun bool
|
||||
enableProgressiveRollouts bool
|
||||
clientConfig clientcmd.ClientConfig
|
||||
metricsAddr string
|
||||
probeBindAddr string
|
||||
webhookAddr string
|
||||
enableLeaderElection bool
|
||||
namespace string
|
||||
argocdRepoServer string
|
||||
policy string
|
||||
debugLog bool
|
||||
dryRun bool
|
||||
enableProgressiveSyncs bool
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = appsetv1alpha1.AddToScheme(scheme)
|
||||
_ = appv1alpha1.AddToScheme(scheme)
|
||||
var command = cobra.Command{
|
||||
Use: "controller",
|
||||
@@ -169,17 +167,17 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
go func() { errors.CheckError(askPassServer.Run(askpass.SocketPath)) }()
|
||||
if err = (&controllers.ApplicationSetReconciler{
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
EnableProgressiveRollouts: enableProgressiveRollouts,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
EnableProgressiveSyncs: enableProgressiveSyncs,
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -207,7 +205,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
|
||||
command.Flags().BoolVar(&enableProgressiveRollouts, "enable-progressive-rollouts", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_ENABLE_PROGRESSIVE_ROLLOUTS", false), "Enable use of the experimental progressive rollouts feature.")
|
||||
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,14 @@ func NewCommand() *cobra.Command {
|
||||
config, err := plugin.ReadPluginConfig(configFilePath)
|
||||
errors.CheckError(err)
|
||||
|
||||
if !config.Spec.Discover.IsDefined() {
|
||||
name := config.Metadata.Name
|
||||
if config.Spec.Version != "" {
|
||||
name = name + "-" + config.Spec.Version
|
||||
}
|
||||
log.Infof("No discovery configuration is defined for plugin %s. To use this plugin, specify %q in the Application's spec.source.plugin.name field.", config.Metadata.Name, name)
|
||||
}
|
||||
|
||||
if otlpAddress != "" {
|
||||
var closer func()
|
||||
var err error
|
||||
|
||||
@@ -156,7 +156,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&logFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", defaultMetricsPort, "Metrics port")
|
||||
command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", "argocd-repo-server:8081", "Argo CD repo server address")
|
||||
command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", common.DefaultRepoServerAddr, "Argo CD repo server address")
|
||||
command.Flags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server")
|
||||
command.Flags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name")
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,9 +29,9 @@ const (
|
||||
var (
|
||||
configMapResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}
|
||||
secretResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
|
||||
applicationsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applications"}
|
||||
appprojectsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "appprojects"}
|
||||
appplicationSetResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applicationsets"}
|
||||
applicationsResource = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.ApplicationPlural}
|
||||
appprojectsResource = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.AppProjectPlural}
|
||||
appplicationSetResource = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.ApplicationSetPlural}
|
||||
)
|
||||
|
||||
// NewAdminCommand returns a new instance of an argocd command
|
||||
@@ -193,11 +195,11 @@ func specsEqual(left, right unstructured.Unstructured) bool {
|
||||
leftData, _, _ := unstructured.NestedMap(left.Object, "data")
|
||||
rightData, _, _ := unstructured.NestedMap(right.Object, "data")
|
||||
return reflect.DeepEqual(leftData, rightData)
|
||||
case "AppProject":
|
||||
case application.AppProjectKind:
|
||||
leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec")
|
||||
rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec")
|
||||
return reflect.DeepEqual(leftSpec, rightSpec)
|
||||
case "Application":
|
||||
case application.ApplicationKind:
|
||||
leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec")
|
||||
rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec")
|
||||
leftStatus, _, _ := unstructured.NestedMap(left.Object, "status")
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
@@ -176,12 +177,12 @@ func NewImportCommand() *cobra.Command {
|
||||
applications, err := acdClients.applications.List(ctx, v1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
for _, app := range applications.Items {
|
||||
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "Application", Name: app.GetName()}] = app
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationKind, Name: app.GetName()}] = app
|
||||
}
|
||||
projects, err := acdClients.projects.List(ctx, v1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
for _, proj := range projects.Items {
|
||||
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "AppProject", Name: proj.GetName()}] = proj
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.AppProjectKind, Name: proj.GetName()}] = proj
|
||||
}
|
||||
applicationSets, err := acdClients.applicationSets.List(ctx, v1.ListOptions{})
|
||||
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
|
||||
@@ -191,7 +192,7 @@ func NewImportCommand() *cobra.Command {
|
||||
}
|
||||
if applicationSets != nil {
|
||||
for _, appSet := range applicationSets.Items {
|
||||
pruneObjects[kube.ResourceKey{Group: "argoproj.io", Kind: "ApplicationSet", Name: appSet.GetName()}] = appSet
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationSetKind, Name: appSet.GetName()}] = appSet
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,11 +210,11 @@ func NewImportCommand() *cobra.Command {
|
||||
dynClient = acdClients.secrets
|
||||
case "ConfigMap":
|
||||
dynClient = acdClients.configMaps
|
||||
case "AppProject":
|
||||
case application.AppProjectKind:
|
||||
dynClient = acdClients.projects
|
||||
case "Application":
|
||||
case application.ApplicationKind:
|
||||
dynClient = acdClients.applications
|
||||
case "ApplicationSet":
|
||||
case application.ApplicationSetKind:
|
||||
dynClient = acdClients.applicationSets
|
||||
}
|
||||
if !exists {
|
||||
@@ -260,9 +261,9 @@ func NewImportCommand() *cobra.Command {
|
||||
switch key.Kind {
|
||||
case "Secret":
|
||||
dynClient = acdClients.secrets
|
||||
case "AppProject":
|
||||
case application.AppProjectKind:
|
||||
dynClient = acdClients.projects
|
||||
case "Application":
|
||||
case application.ApplicationKind:
|
||||
dynClient = acdClients.applications
|
||||
if !dryRun {
|
||||
if finalizers := liveObj.GetFinalizers(); len(finalizers) > 0 {
|
||||
@@ -274,7 +275,7 @@ func NewImportCommand() *cobra.Command {
|
||||
}
|
||||
}
|
||||
}
|
||||
case "ApplicationSet":
|
||||
case application.ApplicationSetKind:
|
||||
dynClient = acdClients.applicationSets
|
||||
default:
|
||||
log.Fatalf("Unexpected kind '%s' in prune list", key.Kind)
|
||||
@@ -314,7 +315,7 @@ func checkAppHasNoNeedToStopOperation(liveObj unstructured.Unstructured, stopOpe
|
||||
return true
|
||||
}
|
||||
switch liveObj.GetKind() {
|
||||
case "Application":
|
||||
case application.ApplicationKind:
|
||||
return liveObj.Object["operation"] == nil
|
||||
}
|
||||
return true
|
||||
@@ -353,9 +354,9 @@ func updateLive(bak, live *unstructured.Unstructured, stopOperation bool) *unstr
|
||||
switch live.GetKind() {
|
||||
case "Secret", "ConfigMap":
|
||||
newLive.Object["data"] = bak.Object["data"]
|
||||
case "AppProject":
|
||||
case application.AppProjectKind:
|
||||
newLive.Object["spec"] = bak.Object["spec"]
|
||||
case "Application":
|
||||
case application.ApplicationKind:
|
||||
newLive.Object["spec"] = bak.Object["spec"]
|
||||
if _, ok := bak.Object["status"]; ok {
|
||||
newLive.Object["status"] = bak.Object["status"]
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -265,9 +265,7 @@ func runClusterNamespacesCommand(ctx context.Context, clientConfig clientcmd.Cli
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if app.Spec.Destination.Server == cluster.Server {
|
||||
nsSet[app.Spec.Destination.Namespace] = true
|
||||
}
|
||||
nsSet[app.Spec.Destination.Namespace] = true
|
||||
}
|
||||
}
|
||||
var namespaces []string
|
||||
@@ -543,6 +541,11 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
|
||||
return
|
||||
}
|
||||
|
||||
if clusterOpts.InCluster && clusterOpts.ClusterEndpoint != "" {
|
||||
log.Fatal("Can only use one of --in-cluster or --cluster-endpoint")
|
||||
return
|
||||
}
|
||||
|
||||
overrides := clientcmd.ConfigOverrides{
|
||||
Context: *clstContext,
|
||||
}
|
||||
@@ -582,9 +585,13 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
|
||||
errors.CheckError(err)
|
||||
|
||||
clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, bearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap)
|
||||
if clusterOpts.InCluster {
|
||||
if clusterOpts.InClusterEndpoint() {
|
||||
clst.Server = argoappv1.KubernetesInternalAPIServerAddr
|
||||
}
|
||||
if clusterOpts.ClusterEndpoint == string(cmdutil.KubePublicEndpoint) {
|
||||
// Ignore `kube-public` cluster endpoints, since this command is intended to run without invoking any network connections.
|
||||
log.Warn("kube-public cluster endpoints are not supported. Falling back to the endpoint listed in the kubconfig context.")
|
||||
}
|
||||
if clusterOpts.Shard >= 0 {
|
||||
clst.Shard = &clusterOpts.Shard
|
||||
}
|
||||
|
||||
@@ -9,13 +9,16 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
func NewDashboardCommand() *cobra.Command {
|
||||
var (
|
||||
port int
|
||||
address string
|
||||
port int
|
||||
address string
|
||||
compressionStr string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "dashboard",
|
||||
@@ -23,7 +26,9 @@ func NewDashboardCommand() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
errors.CheckError(headless.StartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address))
|
||||
compression, err := cache.CompressionTypeFromString(compressionStr)
|
||||
errors.CheckError(err)
|
||||
errors.CheckError(headless.StartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression))
|
||||
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
|
||||
<-ctx.Done()
|
||||
},
|
||||
@@ -31,5 +36,6 @@ func NewDashboardCommand() *cobra.Command {
|
||||
initialize.InitCommand(cmd)
|
||||
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
cmd.Flags().StringVar(&address, "address", common.DefaultAddressAPIServer, "Listen on given address")
|
||||
cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionNone)), "Enable this if the application controller is configured with redis compression enabled. (possible values: none, gzip)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -15,12 +15,13 @@ import (
|
||||
settings "github.com/argoproj/argo-cd/v2/util/notification/settings"
|
||||
"github.com/argoproj/argo-cd/v2/util/tls"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
"github.com/argoproj/notifications-engine/pkg/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
applications = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applications"}
|
||||
applications = schema.GroupVersionResource{Group: application.Group, Version: "v1alpha1", Resource: application.ApplicationPlural}
|
||||
)
|
||||
|
||||
func NewNotificationsCommand() *cobra.Command {
|
||||
@@ -64,7 +65,7 @@ func NewNotificationsCommand() *cobra.Command {
|
||||
log.Fatalf("Failed to initialize Argo CD service: %v", err)
|
||||
}
|
||||
})
|
||||
toolsCommand.PersistentFlags().StringVar(&argocdRepoServer, "argocd-repo-server", "argocd-repo-server:8081", "Argo CD repo server address")
|
||||
toolsCommand.PersistentFlags().StringVar(&argocdRepoServer, "argocd-repo-server", common.DefaultRepoServerAddr, "Argo CD repo server address")
|
||||
toolsCommand.PersistentFlags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server")
|
||||
toolsCommand.PersistentFlags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
return toolsCommand
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
// load the azure plugin (required to authenticate with AKS clusters).
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
)
|
||||
|
||||
// NewProjectAllowListGenCommand generates a project from clusterRole
|
||||
@@ -151,7 +153,7 @@ func generateProjectAllowList(serverResources []*metav1.APIResourceList, cluster
|
||||
}
|
||||
globalProj := v1alpha1.AppProject{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "AppProject",
|
||||
Kind: application.AppProjectKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: projName},
|
||||
|
||||
@@ -206,7 +206,7 @@ var validatorsByGroup = map[string]settingValidator{
|
||||
}
|
||||
ssoProvider = "Dex"
|
||||
} else if general.OIDCConfigRAW != "" {
|
||||
if _, err := settings.UnmarshalOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
if err := settings.ValidateOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
return "", fmt.Errorf("invalid oidc.config: %v", err)
|
||||
}
|
||||
ssoProvider = "OIDC"
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
argocommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
|
||||
@@ -150,9 +149,6 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
if appNamespace != "" {
|
||||
app.Namespace = appNamespace
|
||||
}
|
||||
@@ -169,7 +165,9 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
|
||||
// Get app before creating to see if it is being updated or no change
|
||||
existing, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &app.Name})
|
||||
if grpc.UnwrapGRPCStatus(err).Code() != codes.NotFound {
|
||||
unwrappedError := grpc.UnwrapGRPCStatus(err).Code()
|
||||
// As part of the fix for CVE-2022-41354, the API will return Permission Denied when an app does not exist.
|
||||
if unwrappedError != codes.NotFound && unwrappedError != codes.PermissionDenied {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
@@ -294,10 +292,6 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
pConn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
defer argoio.Close(pConn)
|
||||
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: app.Spec.Project})
|
||||
@@ -625,10 +619,6 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
visited := cmdutil.SetAppSpecOptions(c.Flags(), &app.Spec, &appOpts)
|
||||
if visited == 0 {
|
||||
log.Error("Please set at least one option to update")
|
||||
@@ -655,7 +645,9 @@ type unsetOpts struct {
|
||||
namePrefix bool
|
||||
nameSuffix bool
|
||||
kustomizeVersion bool
|
||||
kustomizeNamespace bool
|
||||
kustomizeImages []string
|
||||
kustomizeReplicas []string
|
||||
parameters []string
|
||||
valuesFiles []string
|
||||
valuesLiteral bool
|
||||
@@ -664,6 +656,17 @@ type unsetOpts struct {
|
||||
passCredentials bool
|
||||
}
|
||||
|
||||
// IsZero returns true when the Application options for kustomize are considered empty
|
||||
func (o *unsetOpts) KustomizeIsZero() bool {
|
||||
return o == nil ||
|
||||
!o.namePrefix &&
|
||||
!o.nameSuffix &&
|
||||
!o.kustomizeVersion &&
|
||||
!o.kustomizeNamespace &&
|
||||
len(o.kustomizeImages) == 0 &&
|
||||
len(o.kustomizeReplicas) == 0
|
||||
}
|
||||
|
||||
// NewApplicationUnsetCommand returns a new instance of an `argocd app unset` command
|
||||
func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
appOpts := cmdutil.AppOptions{}
|
||||
@@ -693,10 +696,6 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
source := app.Spec.GetSource()
|
||||
updated, nothingToUnset := unset(&source, opts)
|
||||
if nothingToUnset {
|
||||
@@ -724,7 +723,9 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
command.Flags().BoolVar(&opts.nameSuffix, "namesuffix", false, "Kustomize namesuffix")
|
||||
command.Flags().BoolVar(&opts.namePrefix, "nameprefix", false, "Kustomize nameprefix")
|
||||
command.Flags().BoolVar(&opts.kustomizeVersion, "kustomize-version", false, "Kustomize version")
|
||||
command.Flags().BoolVar(&opts.kustomizeNamespace, "kustomize-namespace", false, "Kustomize namespace")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeImages, "kustomize-image", []string{}, "Kustomize images name (e.g. --kustomize-image node --kustomize-image mysql)")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeReplicas, "kustomize-replica", []string{}, "Kustomize replicas name (e.g. --kustomize-replica my-deployment --kustomize-replica my-statefulset)")
|
||||
command.Flags().StringArrayVar(&opts.pluginEnvs, "plugin-env", []string{}, "Unset plugin env variables (e.g --plugin-env name)")
|
||||
command.Flags().BoolVar(&opts.passCredentials, "pass-credentials", false, "Unset passCredentials")
|
||||
return command
|
||||
@@ -732,7 +733,7 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
|
||||
func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, nothingToUnset bool) {
|
||||
if source.Kustomize != nil {
|
||||
if !opts.namePrefix && !opts.nameSuffix && !opts.kustomizeVersion && len(opts.kustomizeImages) == 0 {
|
||||
if opts.KustomizeIsZero() {
|
||||
return false, true
|
||||
}
|
||||
|
||||
@@ -751,6 +752,11 @@ func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, n
|
||||
source.Kustomize.Version = ""
|
||||
}
|
||||
|
||||
if opts.kustomizeNamespace && source.Kustomize.Namespace != "" {
|
||||
updated = true
|
||||
source.Kustomize.Namespace = ""
|
||||
}
|
||||
|
||||
for _, kustomizeImage := range opts.kustomizeImages {
|
||||
for i, item := range source.Kustomize.Images {
|
||||
if argoappv1.KustomizeImage(kustomizeImage).Match(item) {
|
||||
@@ -764,6 +770,17 @@ func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, kustomizeReplica := range opts.kustomizeReplicas {
|
||||
kustomizeReplicas := source.Kustomize.Replicas
|
||||
for i, item := range kustomizeReplicas {
|
||||
if kustomizeReplica == item.Name {
|
||||
source.Kustomize.Replicas = append(kustomizeReplicas[0:i], kustomizeReplicas[i+1:]...)
|
||||
updated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if source.Helm != nil {
|
||||
if len(opts.parameters) == 0 && len(opts.valuesFiles) == 0 && !opts.valuesLiteral && !opts.ignoreMissingValueFiles && !opts.passCredentials {
|
||||
@@ -933,10 +950,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
conn, settingsIf := clientset.NewSettingsClientOrDie()
|
||||
@@ -967,7 +980,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
|
||||
diffOption.serversideRes = res
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.6.")
|
||||
fmt.Fprintf(os.Stderr, "Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.")
|
||||
conn, clusterIf := clientset.NewClusterClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
|
||||
@@ -1301,17 +1314,6 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
if cluster != "" {
|
||||
appList = argo.FilterByCluster(appList, cluster)
|
||||
}
|
||||
var appsWithDeprecatedPlugins []string
|
||||
for _, app := range appList {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
appsWithDeprecatedPlugins = append(appsWithDeprecatedPlugins, app.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(appsWithDeprecatedPlugins) > 0 {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
log.Warnf("The following Applications use deprecated plugins: %s", strings.Join(appsWithDeprecatedPlugins, ", "))
|
||||
}
|
||||
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
@@ -1376,6 +1378,7 @@ const (
|
||||
resourceFieldCount = 3
|
||||
resourceFieldNamespaceDelimiter = "/"
|
||||
resourceFieldNameWithNamespaceCount = 2
|
||||
resourceExcludeIndicator = "!"
|
||||
)
|
||||
|
||||
// resource is GROUP:KIND:NAMESPACE/NAME or GROUP:KIND:NAME
|
||||
@@ -1400,6 +1403,12 @@ func parseSelectedResources(resources []string) ([]*argoappv1.SyncOperationResou
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
isExcluded := false
|
||||
// check if the resource flag starts with a '!'
|
||||
if strings.HasPrefix(resource, resourceExcludeIndicator) {
|
||||
resource = strings.TrimPrefix(resource, resourceExcludeIndicator)
|
||||
isExcluded = true
|
||||
}
|
||||
fields := strings.Split(resource, resourceFieldDelimiter)
|
||||
if len(fields) != resourceFieldCount {
|
||||
return nil, fmt.Errorf("Resource should have GROUP%sKIND%sNAME, but instead got: %s", resourceFieldDelimiter, resourceFieldDelimiter, resource)
|
||||
@@ -1413,6 +1422,7 @@ func parseSelectedResources(resources []string) ([]*argoappv1.SyncOperationResou
|
||||
Kind: fields[1],
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Exclude: isExcluded,
|
||||
})
|
||||
}
|
||||
return selectedResources, nil
|
||||
@@ -1447,6 +1457,16 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
# Wait for multiple apps
|
||||
argocd app wait my-app other-app
|
||||
|
||||
# Wait for apps by resource
|
||||
# Resource should be formatted as GROUP:KIND:NAME. If no GROUP is specified then :KIND:NAME.
|
||||
argocd app wait my-app --resource :Service:my-service
|
||||
argocd app wait my-app --resource argoproj.io:Rollout:my-rollout
|
||||
argocd app wait my-app --resource '!apps:Deployment:my-service'
|
||||
argocd app wait my-app --resource apps:Deployment:my-service --resource :Service:my-service
|
||||
argocd app wait my-app --resource '!*:Service:*'
|
||||
# Specify namespace if the application has resources with the same name in different namespaces
|
||||
argocd app wait my-app --resource argoproj.io:Rollout:my-namespace/my-rollout
|
||||
|
||||
# Wait for apps by label, in this example we waiting for apps that are children of another app (aka app-of-apps)
|
||||
argocd app wait -l app.kubernetes.io/instance=my-app
|
||||
argocd app wait -l app.kubernetes.io/instance!=my-app
|
||||
@@ -1485,7 +1505,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&watch.suspended, "suspended", false, "Wait for suspended")
|
||||
command.Flags().BoolVar(&watch.degraded, "degraded", false, "Wait for degraded")
|
||||
command.Flags().StringVarP(&selector, "selector", "l", "", "Wait for apps by label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.")
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter))
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
|
||||
command.Flags().BoolVar(&watch.operation, "operation", false, "Wait for pending operations")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
return command
|
||||
@@ -1545,6 +1565,9 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
# Resource should be formatted as GROUP:KIND:NAME. If no GROUP is specified then :KIND:NAME
|
||||
argocd app sync my-app --resource :Service:my-service
|
||||
argocd app sync my-app --resource argoproj.io:Rollout:my-rollout
|
||||
argocd app sync my-app --resource '!apps:Deployment:my-service'
|
||||
argocd app sync my-app --resource apps:Deployment:my-service --resource :Service:my-service
|
||||
argocd app sync my-app --resource '!*:Service:*'
|
||||
# Specify namespace if the application has resources with the same name in different namespaces
|
||||
argocd app sync my-app --resource argoproj.io:Rollout:my-namespace/my-rollout`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
@@ -1640,11 +1663,15 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
return
|
||||
}
|
||||
|
||||
if local != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
// filters out only those resources that needs to be synced
|
||||
filteredResources := filterAppResources(app, selectedResources)
|
||||
|
||||
// if resources are provided and no app resources match, then return error
|
||||
if len(resources) > 0 && len(filteredResources) == 0 {
|
||||
log.Fatalf("No matching app resources found for resource filter: %v", strings.Join(resources, ", "))
|
||||
}
|
||||
|
||||
if local != "" {
|
||||
if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil && !dryRun {
|
||||
log.Fatal("Cannot use local sync when Automatic Sync Policy is enabled except with --dry-run")
|
||||
}
|
||||
@@ -1690,7 +1717,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
AppNamespace: &appNs,
|
||||
DryRun: &dryRun,
|
||||
Revision: &revision,
|
||||
Resources: selectedResources,
|
||||
Resources: filteredResources,
|
||||
Prune: &prune,
|
||||
Manifests: localObjsStrings,
|
||||
Infos: getInfos(infos),
|
||||
@@ -1718,10 +1745,6 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
if diffChanges {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{
|
||||
ApplicationName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
@@ -1770,7 +1793,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", false, "Preview apply without affecting cluster")
|
||||
command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources")
|
||||
command.Flags().StringVar(&revision, "revision", "", "Sync to a specific revision. Preserves parameter overrides")
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter))
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
|
||||
command.Flags().StringVarP(&selector, "selector", "l", "", "Sync apps that match this label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.")
|
||||
command.Flags().StringArrayVar(&labels, "label", []string{}, "Sync only specific resources with a label. This option may be specified repeatedly.")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
@@ -1895,15 +1918,9 @@ func getResourceStates(app *argoappv1.Application, selectedResources []*argoappv
|
||||
}
|
||||
// filter out not selected resources
|
||||
if len(selectedResources) > 0 {
|
||||
r := []argoappv1.SyncOperationResource{}
|
||||
for _, res := range selectedResources {
|
||||
if res != nil {
|
||||
r = append(r, *res)
|
||||
}
|
||||
}
|
||||
for i := len(states) - 1; i >= 0; i-- {
|
||||
res := states[i]
|
||||
if !argo.ContainsSyncResource(res.Name, res.Namespace, schema.GroupVersionKind{Group: res.Group, Kind: res.Kind}, r) {
|
||||
if !argo.IncludeResource(res.Name, res.Namespace, schema.GroupVersionKind{Group: res.Group, Kind: res.Kind}, selectedResources) {
|
||||
states = append(states[:i], states[i+1:]...)
|
||||
}
|
||||
}
|
||||
@@ -1911,6 +1928,26 @@ func getResourceStates(app *argoappv1.Application, selectedResources []*argoappv
|
||||
return states
|
||||
}
|
||||
|
||||
// filterAppResources selects the app resources that match atleast one of the resource filters.
|
||||
func filterAppResources(app *argoappv1.Application, selectedResources []*argoappv1.SyncOperationResource) []*argoappv1.SyncOperationResource {
|
||||
var filteredResources []*argoappv1.SyncOperationResource
|
||||
if app != nil && len(selectedResources) > 0 {
|
||||
for i := range app.Status.Resources {
|
||||
appResource := app.Status.Resources[i]
|
||||
if (argo.IncludeResource(appResource.Name, appResource.Namespace,
|
||||
schema.GroupVersionKind{Group: appResource.Group, Kind: appResource.Kind}, selectedResources)) {
|
||||
filteredResources = append(filteredResources, &argoappv1.SyncOperationResource{
|
||||
Group: appResource.Group,
|
||||
Kind: appResource.Kind,
|
||||
Name: appResource.Name,
|
||||
Namespace: appResource.Namespace,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return filteredResources
|
||||
}
|
||||
|
||||
func groupResourceStates(app *argoappv1.Application, selectedResources []*argoappv1.SyncOperationResource) map[string]*resourceState {
|
||||
resStates := make(map[string]*resourceState)
|
||||
for _, result := range getResourceStates(app, selectedResources) {
|
||||
@@ -2165,10 +2202,6 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
if output == "id" {
|
||||
printApplicationHistoryIds(app.Status.History)
|
||||
} else {
|
||||
@@ -2229,10 +2262,6 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
depInfo, err := findRevisionHistory(app, int64(depID))
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -2316,10 +2345,6 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
settingsConn, settingsIf := clientset.NewSettingsClientOrDie()
|
||||
defer argoio.Close(settingsConn)
|
||||
argoSettings, err := settingsIf.Get(context.Background(), &settingspkg.SettingsQuery{})
|
||||
@@ -2419,10 +2444,6 @@ func NewApplicationEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
appData, err := json.Marshal(app.Spec)
|
||||
errors.CheckError(err)
|
||||
appData, err = yaml.JSONToYAML(appData)
|
||||
@@ -2464,12 +2485,11 @@ func NewApplicationPatchCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
command := cobra.Command{
|
||||
Use: "patch APPNAME",
|
||||
Short: "Patch application",
|
||||
Long: `Examples:
|
||||
# Update an application's source path using json patch
|
||||
argocd app patch myapplication --patch='[{"op": "replace", "path": "/spec/source/path", "value": "newPath"}]' --type json
|
||||
Example: ` # Update an application's source path using json patch
|
||||
argocd app patch myapplication --patch='[{"op": "replace", "path": "/spec/source/path", "value": "newPath"}]' --type json
|
||||
|
||||
# Update an application's repository target revision using merge patch
|
||||
argocd app patch myapplication --patch '{"spec": { "source": { "targetRevision": "master" } }}' --type merge`,
|
||||
# Update an application's repository target revision using merge patch
|
||||
argocd app patch myapplication --patch '{"spec": { "source": { "targetRevision": "master" } }}' --type merge`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
applicationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
@@ -202,7 +203,7 @@ func getActionableResourcesForApplication(appIf applicationpkg.ApplicationServic
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
app.Kind = "Application"
|
||||
app.Kind = application.ApplicationKind
|
||||
app.APIVersion = "argoproj.io/v1alpha1"
|
||||
appManifest, err := json.Marshal(app)
|
||||
if err != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func Test_getInfos(t *testing.T) {
|
||||
@@ -750,6 +752,16 @@ func Test_unset(t *testing.T) {
|
||||
"old1=new:tag",
|
||||
"old2=new:tag",
|
||||
},
|
||||
Replicas: []v1alpha1.KustomizeReplica{
|
||||
{
|
||||
Name: "my-deployment",
|
||||
Count: intstr.FromInt(2),
|
||||
},
|
||||
{
|
||||
Name: "my-statefulset",
|
||||
Count: intstr.FromInt(4),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -826,6 +838,15 @@ func Test_unset(t *testing.T) {
|
||||
assert.False(t, updated)
|
||||
assert.False(t, nothingToUnset)
|
||||
|
||||
assert.Equal(t, 2, len(kustomizeSource.Kustomize.Replicas))
|
||||
updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeReplicas: []string{"my-deployment"}})
|
||||
assert.Equal(t, 1, len(kustomizeSource.Kustomize.Replicas))
|
||||
assert.True(t, updated)
|
||||
assert.False(t, nothingToUnset)
|
||||
updated, nothingToUnset = unset(kustomizeSource, unsetOpts{kustomizeReplicas: []string{"my-deployment"}})
|
||||
assert.False(t, updated)
|
||||
assert.False(t, nothingToUnset)
|
||||
|
||||
assert.Equal(t, 2, len(helmSource.Helm.Parameters))
|
||||
updated, nothingToUnset = unset(helmSource, unsetOpts{parameters: []string{"name-1"}})
|
||||
assert.Equal(t, 1, len(helmSource.Helm.Parameters))
|
||||
@@ -904,22 +925,252 @@ func Test_unset_nothingToUnset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterAppResources(t *testing.T) {
|
||||
// App resources
|
||||
var (
|
||||
appReplicaSet1 = v1alpha1.ResourceStatus{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name1",
|
||||
}
|
||||
appReplicaSet2 = v1alpha1.ResourceStatus{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name2",
|
||||
}
|
||||
appJob = v1alpha1.ResourceStatus{
|
||||
Group: "batch",
|
||||
Kind: "Job",
|
||||
Namespace: "default",
|
||||
Name: "job-name",
|
||||
}
|
||||
appService1 = v1alpha1.ResourceStatus{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name1",
|
||||
}
|
||||
appService2 = v1alpha1.ResourceStatus{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name2",
|
||||
}
|
||||
appDeployment = v1alpha1.ResourceStatus{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
Namespace: "default",
|
||||
Name: "deployment-name",
|
||||
}
|
||||
)
|
||||
app := v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Resources: []v1alpha1.ResourceStatus{
|
||||
appReplicaSet1, appReplicaSet2, appJob, appService1, appService2, appDeployment},
|
||||
},
|
||||
}
|
||||
// Resource filters
|
||||
var (
|
||||
blankValues = argoappv1.SyncOperationResource{
|
||||
Group: "",
|
||||
Kind: "",
|
||||
Name: "",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// *:*:*
|
||||
includeAllResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "*",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// !*:*:*
|
||||
excludeAllResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "*",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: true}
|
||||
// *:Service:*
|
||||
includeAllServiceResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "Service",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// !*:Service:*
|
||||
excludeAllServiceResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "Service",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: true}
|
||||
// apps:ReplicaSet:replicaSet-name1
|
||||
includeReplicaSet1Resource = argoappv1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "replicaSet-name1",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// !apps:ReplicaSet:replicaSet-name2
|
||||
excludeReplicaSet2Resource = argoappv1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "replicaSet-name2",
|
||||
Namespace: "",
|
||||
Exclude: true}
|
||||
)
|
||||
|
||||
// Filtered resources
|
||||
var (
|
||||
replicaSet1 = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name1",
|
||||
}
|
||||
replicaSet2 = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name2",
|
||||
}
|
||||
job = v1alpha1.SyncOperationResource{
|
||||
Group: "batch",
|
||||
Kind: "Job",
|
||||
Namespace: "default",
|
||||
Name: "job-name",
|
||||
}
|
||||
service1 = v1alpha1.SyncOperationResource{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name1",
|
||||
}
|
||||
service2 = v1alpha1.SyncOperationResource{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name2",
|
||||
}
|
||||
deployment = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
Namespace: "default",
|
||||
Name: "deployment-name",
|
||||
}
|
||||
)
|
||||
tests := []struct {
|
||||
testName string
|
||||
selectedResources []*argoappv1.SyncOperationResource
|
||||
expectedResult []*argoappv1.SyncOperationResource
|
||||
}{
|
||||
//--resource apps:ReplicaSet:replicaSet-name1 --resource *:Service:*
|
||||
{testName: "Include ReplicaSet replicaSet-name1 resouce and all service resources",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeAllServiceResources, &includeReplicaSet1Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &service1, &service2},
|
||||
},
|
||||
//--resource apps:ReplicaSet:replicaSet-name1 --resource !*:Service:*
|
||||
{testName: "Include ReplicaSet replicaSet-name1 resouce and exclude all service resources",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeAllServiceResources, &includeReplicaSet1Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &deployment},
|
||||
},
|
||||
// --resource !apps:ReplicaSet:replicaSet-name2 --resource !*:Service:*
|
||||
{testName: "Exclude ReplicaSet replicaSet-name2 resouce and all service resources",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeReplicaSet2Resource, &excludeAllServiceResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &service1, &service2, &deployment},
|
||||
},
|
||||
// --resource !apps:ReplicaSet:replicaSet-name2
|
||||
{testName: "Exclude ReplicaSet replicaSet-name2 resouce",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeReplicaSet2Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &job, &service1, &service2, &deployment},
|
||||
},
|
||||
// --resource apps:ReplicaSet:replicaSet-name1
|
||||
{testName: "Include ReplicaSet replicaSet-name1 resouce",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeReplicaSet1Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1},
|
||||
},
|
||||
// --resource !*:Service:*
|
||||
{testName: "Exclude Service resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeAllServiceResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &deployment},
|
||||
},
|
||||
// --resource *:Service:*
|
||||
{testName: "Include Service resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeAllServiceResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&service1, &service2},
|
||||
},
|
||||
// --resource !*:*:*
|
||||
{testName: "Exclude all resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeAllResources},
|
||||
expectedResult: nil,
|
||||
},
|
||||
// --resource *:*:*
|
||||
{testName: "Include all resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeAllResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &service1, &service2, &deployment},
|
||||
},
|
||||
{testName: "No Filters",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&blankValues},
|
||||
expectedResult: nil,
|
||||
},
|
||||
{testName: "Empty Filter",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{},
|
||||
expectedResult: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
filteredResources := filterAppResources(&app, test.selectedResources)
|
||||
assert.Equal(t, test.expectedResult, filteredResources)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSelectedResources(t *testing.T) {
|
||||
resources := []string{"v1alpha:Application:test", "v1alpha:Application:namespace/test"}
|
||||
resources := []string{"v1alpha:Application:test",
|
||||
"v1alpha:Application:namespace/test",
|
||||
"!v1alpha:Application:test",
|
||||
"apps:Deployment:default/test",
|
||||
"!*:*:*"}
|
||||
operationResources, err := parseSelectedResources(resources)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, operationResources, 2)
|
||||
assert.Len(t, operationResources, 5)
|
||||
assert.Equal(t, *operationResources[0], v1alpha1.SyncOperationResource{
|
||||
Namespace: "",
|
||||
Name: "test",
|
||||
Kind: "Application",
|
||||
Kind: application.ApplicationKind,
|
||||
Group: "v1alpha",
|
||||
})
|
||||
assert.Equal(t, *operationResources[1], v1alpha1.SyncOperationResource{
|
||||
Namespace: "namespace",
|
||||
Name: "test",
|
||||
Kind: application.ApplicationKind,
|
||||
Group: "v1alpha",
|
||||
})
|
||||
assert.Equal(t, *operationResources[2], v1alpha1.SyncOperationResource{
|
||||
Namespace: "",
|
||||
Name: "test",
|
||||
Kind: "Application",
|
||||
Group: "v1alpha",
|
||||
Exclude: true,
|
||||
})
|
||||
assert.Equal(t, *operationResources[3], v1alpha1.SyncOperationResource{
|
||||
Namespace: "default",
|
||||
Name: "test",
|
||||
Kind: "Deployment",
|
||||
Group: "apps",
|
||||
Exclude: false,
|
||||
})
|
||||
assert.Equal(t, *operationResources[4], v1alpha1.SyncOperationResource{
|
||||
Namespace: "",
|
||||
Name: "*",
|
||||
Kind: "*",
|
||||
Group: "*",
|
||||
Exclude: true,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -343,16 +343,19 @@ func printAppSetSummaryTable(appSet *arogappsetv1.ApplicationSet) {
|
||||
fmt.Printf(printOpFmtStr, "Path:", source.Path)
|
||||
printAppSourceDetails(&source)
|
||||
|
||||
var syncPolicy string
|
||||
if appSet.Spec.SyncPolicy != nil && appSet.Spec.Template.Spec.SyncPolicy.Automated != nil {
|
||||
syncPolicy = "Automated"
|
||||
if appSet.Spec.Template.Spec.SyncPolicy.Automated.Prune {
|
||||
syncPolicy += " (Prune)"
|
||||
var (
|
||||
syncPolicyStr string
|
||||
syncPolicy = appSet.Spec.Template.Spec.SyncPolicy
|
||||
)
|
||||
if syncPolicy != nil && syncPolicy.Automated != nil {
|
||||
syncPolicyStr = "Automated"
|
||||
if syncPolicy.Automated.Prune {
|
||||
syncPolicyStr += " (Prune)"
|
||||
}
|
||||
} else {
|
||||
syncPolicy = "<none>"
|
||||
syncPolicyStr = "<none>"
|
||||
}
|
||||
fmt.Printf(printOpFmtStr, "SyncPolicy:", syncPolicy)
|
||||
fmt.Printf(printOpFmtStr, "SyncPolicy:", syncPolicyStr)
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
@@ -68,3 +70,124 @@ func TestPrintApplicationSetTable(t *testing.T) {
|
||||
expectation := "NAME NAMESPACE PROJECT SYNCPOLICY CONDITIONS\napp-name default nil [{ResourcesUpToDate <nil> True }]\napp-name default nil [{ResourcesUpToDate <nil> True }]\n"
|
||||
assert.Equal(t, expectation, output)
|
||||
}
|
||||
|
||||
func TestPrintAppSetSummaryTable(t *testing.T) {
|
||||
baseAppSet := &arogappsetv1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-name",
|
||||
},
|
||||
Spec: arogappsetv1.ApplicationSetSpec{
|
||||
Generators: []arogappsetv1.ApplicationSetGenerator{
|
||||
arogappsetv1.ApplicationSetGenerator{
|
||||
Git: &arogappsetv1.GitGenerator{
|
||||
RepoURL: "https://github.com/argoproj/argo-cd.git",
|
||||
Revision: "head",
|
||||
Directories: []arogappsetv1.GitDirectoryGeneratorItem{
|
||||
arogappsetv1.GitDirectoryGeneratorItem{
|
||||
Path: "applicationset/examples/git-generator-directory/cluster-addons/*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: arogappsetv1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: arogappsetv1.ApplicationSetStatus{
|
||||
Conditions: []arogappsetv1.ApplicationSetCondition{
|
||||
arogappsetv1.ApplicationSetCondition{
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Type: arogappsetv1.ApplicationSetConditionResourcesUpToDate,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
appsetSpecSyncPolicy := baseAppSet.DeepCopy()
|
||||
appsetSpecSyncPolicy.Spec.SyncPolicy = &arogappsetv1.ApplicationSetSyncPolicy{
|
||||
PreserveResourcesOnDeletion: true,
|
||||
}
|
||||
|
||||
appSetTemplateSpecSyncPolicy := baseAppSet.DeepCopy()
|
||||
appSetTemplateSpecSyncPolicy.Spec.Template.Spec.SyncPolicy = &arogappsetv1.SyncPolicy{
|
||||
Automated: &arogappsetv1.SyncPolicyAutomated{
|
||||
SelfHeal: true,
|
||||
},
|
||||
}
|
||||
|
||||
appSetBothSyncPolicies := baseAppSet.DeepCopy()
|
||||
appSetBothSyncPolicies.Spec.SyncPolicy = &arogappsetv1.ApplicationSetSyncPolicy{
|
||||
PreserveResourcesOnDeletion: true,
|
||||
}
|
||||
appSetBothSyncPolicies.Spec.Template.Spec.SyncPolicy = &arogappsetv1.SyncPolicy{
|
||||
Automated: &arogappsetv1.SyncPolicyAutomated{
|
||||
SelfHeal: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
appSet *arogappsetv1.ApplicationSet
|
||||
expectedOutput string
|
||||
}{
|
||||
{
|
||||
name: "appset with only spec.syncPolicy set",
|
||||
appSet: appsetSpecSyncPolicy,
|
||||
expectedOutput: `Name: app-name
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: <none>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "appset with only spec.template.spec.syncPolicy set",
|
||||
appSet: appSetTemplateSpecSyncPolicy,
|
||||
expectedOutput: `Name: app-name
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: Automated
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "appset with both spec.SyncPolicy and spec.template.spec.syncPolicy set",
|
||||
appSet: appSetBothSyncPolicies,
|
||||
expectedOutput: `Name: app-name
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: Automated
|
||||
`,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
defer func() {
|
||||
os.Stdout = oldStdout
|
||||
}()
|
||||
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
printAppSetSummaryTable(tt.appSet)
|
||||
w.Close()
|
||||
|
||||
out, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedOutput, string(out))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,17 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/text/label"
|
||||
)
|
||||
|
||||
const (
|
||||
// type of the cluster ID is 'name'
|
||||
clusterIdTypeName = "name"
|
||||
// cluster field is 'name'
|
||||
clusterFieldName = "name"
|
||||
// cluster field is 'namespaces'
|
||||
clusterFieldNamespaces = "namespaces"
|
||||
// indicates managing all namespaces
|
||||
allNamespaces = "*"
|
||||
)
|
||||
|
||||
// NewClusterCommand returns a new instance of an `argocd cluster` command
|
||||
func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
@@ -47,7 +58,10 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
|
||||
|
||||
# Remove a target cluster context from ArgoCD
|
||||
argocd cluster rm example-cluster
|
||||
`,
|
||||
|
||||
# Set a target cluster context from ArgoCD
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace '*'
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace namespace-one --namespace namespace-two`,
|
||||
}
|
||||
|
||||
command.AddCommand(NewClusterAddCommand(clientOpts, pathOpts))
|
||||
@@ -55,6 +69,7 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
|
||||
command.AddCommand(NewClusterListCommand(clientOpts))
|
||||
command.AddCommand(NewClusterRemoveCommand(clientOpts, pathOpts))
|
||||
command.AddCommand(NewClusterRotateAuthCommand(clientOpts))
|
||||
command.AddCommand(NewClusterSetCommand(clientOpts))
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -78,9 +93,17 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
cmdutil.PrintKubeContexts(configAccess)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if clusterOpts.InCluster && clusterOpts.ClusterEndpoint != "" {
|
||||
log.Fatal("Can only use one of --in-cluster or --cluster-endpoint")
|
||||
return
|
||||
}
|
||||
|
||||
contextName := args[0]
|
||||
conf, err := getRestConfig(pathOpts, contextName)
|
||||
errors.CheckError(err)
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
errors.CheckError(err)
|
||||
managerBearerToken := ""
|
||||
var awsAuthConf *argoappv1.AWSAuthConfig
|
||||
var execProviderConf *argoappv1.ExecProviderConfig
|
||||
@@ -99,13 +122,10 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
}
|
||||
} else {
|
||||
// Install RBAC resources for managing the cluster
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
errors.CheckError(err)
|
||||
if clusterOpts.ServiceAccount != "" {
|
||||
managerBearerToken, err = clusterauth.GetServiceAccountBearerToken(clientset, clusterOpts.SystemNamespace, clusterOpts.ServiceAccount, common.BearerTokenTimeout)
|
||||
} else {
|
||||
isTerminal := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
|
||||
|
||||
if isTerminal && !skipConfirmation {
|
||||
accessLevel := "cluster"
|
||||
if len(clusterOpts.Namespaces) > 0 {
|
||||
@@ -132,9 +152,18 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
contextName = clusterOpts.Name
|
||||
}
|
||||
clst := cmdutil.NewCluster(contextName, clusterOpts.Namespaces, clusterOpts.ClusterResources, conf, managerBearerToken, awsAuthConf, execProviderConf, labelsMap, annotationsMap)
|
||||
if clusterOpts.InCluster {
|
||||
if clusterOpts.InClusterEndpoint() {
|
||||
clst.Server = argoappv1.KubernetesInternalAPIServerAddr
|
||||
} else if clusterOpts.ClusterEndpoint == string(cmdutil.KubePublicEndpoint) {
|
||||
endpoint, err := cmdutil.GetKubePublicEndpoint(clientset)
|
||||
if err != nil || len(endpoint) == 0 {
|
||||
log.Warnf("Failed to find the cluster endpoint from kube-public data: %v", err)
|
||||
log.Infof("Falling back to the endpoint '%s' as listed in the kubeconfig context", clst.Server)
|
||||
endpoint = clst.Server
|
||||
}
|
||||
clst.Server = endpoint
|
||||
}
|
||||
|
||||
if clusterOpts.Shard >= 0 {
|
||||
clst.Shard = &clusterOpts.Shard
|
||||
}
|
||||
@@ -185,6 +214,72 @@ func getRestConfig(pathOpts *clientcmd.PathOptions, ctxName string) (*rest.Confi
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// NewClusterSetCommand returns a new instance of an `argocd cluster set` command
|
||||
func NewClusterSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
clusterOptions cmdutil.ClusterOptions
|
||||
clusterName string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "set NAME",
|
||||
Short: "Set cluster information",
|
||||
Example: ` # Set cluster information
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace '*'
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace namespace-one --namespace namespace-two`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
// name of the cluster whose fields have to be updated.
|
||||
clusterName = args[0]
|
||||
conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie()
|
||||
defer io.Close(conn)
|
||||
// checks the fields that needs to be updated
|
||||
updatedFields := checkFieldsToUpdate(clusterOptions)
|
||||
namespaces := clusterOptions.Namespaces
|
||||
// check if all namespaces have to be considered
|
||||
if len(namespaces) == 1 && strings.EqualFold(namespaces[0], allNamespaces) {
|
||||
namespaces[0] = ""
|
||||
}
|
||||
if updatedFields != nil {
|
||||
clusterUpdateRequest := clusterpkg.ClusterUpdateRequest{
|
||||
Cluster: &argoappv1.Cluster{
|
||||
Name: clusterOptions.Name,
|
||||
Namespaces: namespaces,
|
||||
},
|
||||
UpdatedFields: updatedFields,
|
||||
Id: &clusterpkg.ClusterID{
|
||||
Type: clusterIdTypeName,
|
||||
Value: clusterName,
|
||||
},
|
||||
}
|
||||
_, err := clusterIf.Update(ctx, &clusterUpdateRequest)
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Cluster '%s' updated.\n", clusterName)
|
||||
} else {
|
||||
fmt.Print("Specify the cluster field to be updated.\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&clusterOptions.Name, "name", "", "Overwrite the cluster name")
|
||||
command.Flags().StringArrayVar(&clusterOptions.Namespaces, "namespace", nil, "List of namespaces which are allowed to manage. Specify '*' to manage all namespaces")
|
||||
return command
|
||||
}
|
||||
|
||||
// checkFieldsToUpdate returns the fields that needs to be updated
|
||||
func checkFieldsToUpdate(clusterOptions cmdutil.ClusterOptions) []string {
|
||||
var updatedFields []string
|
||||
if clusterOptions.Name != "" {
|
||||
updatedFields = append(updatedFields, clusterFieldName)
|
||||
}
|
||||
if clusterOptions.Namespaces != nil {
|
||||
updatedFields = append(updatedFields, clusterFieldNamespaces)
|
||||
}
|
||||
return updatedFields
|
||||
}
|
||||
|
||||
// NewClusterGetCommand returns a new instance of an `argocd cluster get` command
|
||||
func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
|
||||
@@ -146,6 +146,7 @@ __argocd_custom_func() {
|
||||
;;
|
||||
argocd_cluster_get | \
|
||||
argocd_cluster_rm | \
|
||||
argocd_cluster_set | \
|
||||
argocd_login | \
|
||||
argocd_cluster_add)
|
||||
__argocd_list_servers
|
||||
@@ -203,8 +204,13 @@ To access completions in your current shell, run
|
||||
$ source <(argocd completion bash)
|
||||
Alternatively, write it to a file and source in .bash_profile
|
||||
|
||||
For zsh, output to a file in a directory referenced by the $fpath shell
|
||||
variable.
|
||||
For zsh, add the following to your ~/.zshrc file:
|
||||
source <(argocd completion zsh)
|
||||
compdef _argocd argocd
|
||||
|
||||
Optionally, also add the following, in case you are getting errors involving compdef & compinit such as command not found: compdef:
|
||||
autoload -Uz compinit
|
||||
compinit
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -38,11 +38,12 @@ import (
|
||||
)
|
||||
|
||||
type forwardCacheClient struct {
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
client cache.CacheClient
|
||||
err error
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
client cache.CacheClient
|
||||
compression cache.RedisCompressionType
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error {
|
||||
@@ -58,7 +59,7 @@ func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error)
|
||||
}
|
||||
|
||||
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort)})
|
||||
c.client = cache.NewRedisCache(redisClient, time.Hour, cache.RedisCompressionNone)
|
||||
c.client = cache.NewRedisCache(redisClient, time.Hour, c.compression)
|
||||
})
|
||||
if c.err != nil {
|
||||
return c.err
|
||||
@@ -139,7 +140,7 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
|
||||
|
||||
// StartLocalServer allows executing command in a headless mode: on the fly starts Argo CD API server and
|
||||
// changes provided client options to use started API server port
|
||||
func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string) error {
|
||||
func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType) error {
|
||||
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
|
||||
clientConfig := cli.AddKubectlFlagsToSet(flags)
|
||||
startInProcessAPI := clientOpts.Core
|
||||
@@ -200,7 +201,7 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr}), time.Hour)
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
@@ -243,7 +244,7 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
|
||||
ctx := c.Context()
|
||||
|
||||
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
|
||||
err := StartLocalServer(ctx, opts, ctxStr, nil, nil)
|
||||
err := StartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
|
||||
@@ -160,6 +160,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
repoOpts.Repo.GithubAppInstallationId = repoOpts.GithubAppInstallationId
|
||||
repoOpts.Repo.GitHubAppEnterpriseBaseURL = repoOpts.GitHubAppEnterpriseBaseURL
|
||||
repoOpts.Repo.Proxy = repoOpts.Proxy
|
||||
repoOpts.Repo.ForceHttpBasicAuth = repoOpts.ForceHttpBasicAuth
|
||||
|
||||
if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" {
|
||||
errors.CheckError(fmt.Errorf("Must specify --name for repos of type 'helm'"))
|
||||
@@ -199,6 +200,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
Proxy: repoOpts.Proxy,
|
||||
Project: repoOpts.Repo.Project,
|
||||
GcpServiceAccountKey: repoOpts.Repo.GCPServiceAccountKey,
|
||||
ForceHttpBasicAuth: repoOpts.Repo.ForceHttpBasicAuth,
|
||||
}
|
||||
_, err := repoIf.ValidateAccess(ctx, &repoAccessReq)
|
||||
errors.CheckError(err)
|
||||
@@ -309,7 +311,7 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -360,6 +362,6 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -175,6 +175,7 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
command.Flags().BoolVar(&repo.EnableOCI, "enable-oci", false, "Specifies whether helm-oci support should be enabled for this repo")
|
||||
command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"")
|
||||
command.Flags().StringVar(&gcpServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&repo.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force basic auth when connecting via HTTP")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -64,11 +64,13 @@ type AppOptions struct {
|
||||
jsonnetExtVarCode []string
|
||||
jsonnetLibs []string
|
||||
kustomizeImages []string
|
||||
kustomizeReplicas []string
|
||||
kustomizeVersion string
|
||||
kustomizeCommonLabels []string
|
||||
kustomizeCommonAnnotations []string
|
||||
kustomizeForceCommonLabels bool
|
||||
kustomizeForceCommonAnnotations bool
|
||||
kustomizeNamespace string
|
||||
pluginEnvs []string
|
||||
Validate bool
|
||||
directoryExclude string
|
||||
@@ -117,12 +119,14 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringArrayVar(&opts.jsonnetExtVarCode, "jsonnet-ext-var-code", []string{}, "Jsonnet ext var")
|
||||
command.Flags().StringArrayVar(&opts.jsonnetLibs, "jsonnet-libs", []string{}, "Additional jsonnet libs (prefixed by repoRoot)")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeImages, "kustomize-image", []string{}, "Kustomize images (e.g. --kustomize-image node:8.15.0 --kustomize-image mysql=mariadb,alpine@sha256:24a0c4b4a4c0eb97a1aabb8e29f18e917d05abfe1b7a7c07857230879ce7d3d)")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeReplicas, "kustomize-replica", []string{}, "Kustomize replicas (e.g. --kustomize-replica my-development=2 --kustomize-replica my-statefulset=4)")
|
||||
command.Flags().StringArrayVar(&opts.pluginEnvs, "plugin-env", []string{}, "Additional plugin envs")
|
||||
command.Flags().BoolVar(&opts.Validate, "validate", true, "Validation of repo and cluster")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeCommonLabels, "kustomize-common-label", []string{}, "Set common labels in Kustomize")
|
||||
command.Flags().StringArrayVar(&opts.kustomizeCommonAnnotations, "kustomize-common-annotation", []string{}, "Set common labels in Kustomize")
|
||||
command.Flags().BoolVar(&opts.kustomizeForceCommonLabels, "kustomize-force-common-label", false, "Force common labels in Kustomize")
|
||||
command.Flags().BoolVar(&opts.kustomizeForceCommonAnnotations, "kustomize-force-common-annotation", false, "Force common annotations in Kustomize")
|
||||
command.Flags().StringVar(&opts.kustomizeNamespace, "kustomize-namespace", "", "Kustomize namespace")
|
||||
command.Flags().StringVar(&opts.directoryExclude, "directory-exclude", "", "Set glob expression used to exclude files from application source path")
|
||||
command.Flags().StringVar(&opts.directoryInclude, "directory-include", "", "Set glob expression used to include files from application source path")
|
||||
command.Flags().Int64Var(&opts.retryLimit, "sync-retry-limit", 0, "Max number of allowed sync retries")
|
||||
@@ -218,8 +222,12 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
setKustomizeOpt(source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
case "kustomize-image":
|
||||
setKustomizeOpt(source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
case "kustomize-replica":
|
||||
setKustomizeOpt(source, kustomizeOpts{replicas: appOpts.kustomizeReplicas})
|
||||
case "kustomize-version":
|
||||
setKustomizeOpt(source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
case "kustomize-namespace":
|
||||
setKustomizeOpt(source, kustomizeOpts{namespace: appOpts.kustomizeNamespace})
|
||||
case "kustomize-common-label":
|
||||
parsedLabels, err := label.Parse(appOpts.kustomizeCommonLabels)
|
||||
errors.CheckError(err)
|
||||
@@ -328,11 +336,13 @@ type kustomizeOpts struct {
|
||||
namePrefix string
|
||||
nameSuffix string
|
||||
images []string
|
||||
replicas []string
|
||||
version string
|
||||
commonLabels map[string]string
|
||||
commonAnnotations map[string]string
|
||||
forceCommonLabels bool
|
||||
forceCommonAnnotations bool
|
||||
namespace string
|
||||
}
|
||||
|
||||
func setKustomizeOpt(src *argoappv1.ApplicationSource, opts kustomizeOpts) {
|
||||
@@ -348,6 +358,9 @@ func setKustomizeOpt(src *argoappv1.ApplicationSource, opts kustomizeOpts) {
|
||||
if opts.nameSuffix != "" {
|
||||
src.Kustomize.NameSuffix = opts.nameSuffix
|
||||
}
|
||||
if opts.namespace != "" {
|
||||
src.Kustomize.Namespace = opts.namespace
|
||||
}
|
||||
if opts.commonLabels != nil {
|
||||
src.Kustomize.CommonLabels = opts.commonLabels
|
||||
}
|
||||
@@ -363,6 +376,14 @@ func setKustomizeOpt(src *argoappv1.ApplicationSource, opts kustomizeOpts) {
|
||||
for _, image := range opts.images {
|
||||
src.Kustomize.MergeImage(argoappv1.KustomizeImage(image))
|
||||
}
|
||||
for _, replica := range opts.replicas {
|
||||
r, err := argoappv1.NewKustomizeReplica(replica)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
src.Kustomize.MergeReplica(*r)
|
||||
}
|
||||
|
||||
if src.Kustomize.IsZero() {
|
||||
src.Kustomize = nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func Test_setHelmOpt(t *testing.T) {
|
||||
@@ -86,11 +88,32 @@ func Test_setKustomizeOpt(t *testing.T) {
|
||||
setKustomizeOpt(&src, kustomizeOpts{images: []string{"org/image:v1", "org/image:v2"}})
|
||||
assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Images: v1alpha1.KustomizeImages{v1alpha1.KustomizeImage("org/image:v2")}}, src.Kustomize)
|
||||
})
|
||||
t.Run("Replicas", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
testReplicasString := []string{"my-deployment=2", "my-statefulset=4"}
|
||||
testReplicas := v1alpha1.KustomizeReplicas{
|
||||
{
|
||||
Name: "my-deployment",
|
||||
Count: intstr.FromInt(2),
|
||||
},
|
||||
{
|
||||
Name: "my-statefulset",
|
||||
Count: intstr.FromInt(4),
|
||||
},
|
||||
}
|
||||
setKustomizeOpt(&src, kustomizeOpts{replicas: testReplicasString})
|
||||
assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Replicas: testReplicas}, src.Kustomize)
|
||||
})
|
||||
t.Run("Version", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setKustomizeOpt(&src, kustomizeOpts{version: "v0.1"})
|
||||
assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Version: "v0.1"}, src.Kustomize)
|
||||
})
|
||||
t.Run("Namespace", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setKustomizeOpt(&src, kustomizeOpts{namespace: "custom-namespace"})
|
||||
assert.Equal(t, &v1alpha1.ApplicationSourceKustomize{Namespace: "custom-namespace"}, src.Kustomize)
|
||||
})
|
||||
t.Run("Common labels", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setKustomizeOpt(&src, kustomizeOpts{commonLabels: map[string]string{"foo1": "bar1", "foo2": "bar2"}})
|
||||
@@ -191,6 +214,11 @@ func Test_setAppSpecOptions(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlag("sync-retry-limit", "0"))
|
||||
assert.Nil(t, f.spec.SyncPolicy.Retry)
|
||||
})
|
||||
t.Run("Kustomize", func(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlag("kustomize-replica", "my-deployment=2"))
|
||||
assert.NoError(t, f.SetFlag("kustomize-replica", "my-statefulset=4"))
|
||||
assert.Equal(t, f.spec.Source.Kustomize.Replicas, argoappv1.KustomizeReplicas{{Name: "my-deployment", Count: intstr.FromInt(2)}, {Name: "my-statefulset", Count: intstr.FromInt(4)}})
|
||||
})
|
||||
}
|
||||
|
||||
func Test_setAnnotations(t *testing.T) {
|
||||
|
||||
@@ -61,6 +61,6 @@ func readAppset(yml []byte, appsets *[]*argoprojiov1alpha1.ApplicationSet) error
|
||||
*appsets = append(*appsets, &appset)
|
||||
|
||||
}
|
||||
|
||||
return fmt.Errorf("error reading app set: %w", err)
|
||||
// we reach here if there is no error found while reading the Application Set
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,20 +1,33 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
type ClusterEndpoint string
|
||||
|
||||
const (
|
||||
KubeConfigEndpoint ClusterEndpoint = "kubeconfig"
|
||||
KubePublicEndpoint ClusterEndpoint = "kube-public"
|
||||
KubeInternalEndpoint ClusterEndpoint = "internal"
|
||||
)
|
||||
|
||||
func PrintKubeContexts(ca clientcmd.ConfigAccess) {
|
||||
config, err := ca.GetStartingConfig()
|
||||
errors.CheckError(err)
|
||||
@@ -102,6 +115,30 @@ func NewCluster(name string, namespaces []string, clusterResources bool, conf *r
|
||||
return &clst
|
||||
}
|
||||
|
||||
// GetKubePublicEndpoint returns the kubernetes apiserver endpoint as published
|
||||
// in the kube-public.
|
||||
func GetKubePublicEndpoint(client kubernetes.Interface) (string, error) {
|
||||
clusterInfo, err := client.CoreV1().ConfigMaps("kube-public").Get(context.TODO(), "cluster-info", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
kubeconfig, ok := clusterInfo.Data["kubeconfig"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("cluster-info does not contain a public kubeconfig")
|
||||
}
|
||||
// Parse Kubeconfig and get server address
|
||||
config := &clientcmdapiv1.Config{}
|
||||
err = yaml.Unmarshal([]byte(kubeconfig), config)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse cluster-info kubeconfig: %v", err)
|
||||
}
|
||||
if len(config.Clusters) == 0 {
|
||||
return "", fmt.Errorf("cluster-info kubeconfig does not have any clusters")
|
||||
}
|
||||
|
||||
return config.Clusters[0].Cluster.Server, nil
|
||||
}
|
||||
|
||||
type ClusterOptions struct {
|
||||
InCluster bool
|
||||
Upsert bool
|
||||
@@ -119,6 +156,13 @@ type ClusterOptions struct {
|
||||
ExecProviderEnv map[string]string
|
||||
ExecProviderAPIVersion string
|
||||
ExecProviderInstallHint string
|
||||
ClusterEndpoint string
|
||||
}
|
||||
|
||||
// InClusterEndpoint returns true if ArgoCD should reference the in-cluster
|
||||
// endpoint when registering the target cluster.
|
||||
func (o ClusterOptions) InClusterEndpoint() bool {
|
||||
return o.InCluster || o.ClusterEndpoint == string(KubeInternalEndpoint)
|
||||
}
|
||||
|
||||
func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) {
|
||||
@@ -135,4 +179,5 @@ func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) {
|
||||
command.Flags().StringToStringVar(&opts.ExecProviderEnv, "exec-command-env", nil, "Environment vars to set when running the --exec-command executable")
|
||||
command.Flags().StringVar(&opts.ExecProviderAPIVersion, "exec-command-api-version", "", "Preferred input version of the ExecInfo for the --exec-command executable")
|
||||
command.Flags().StringVar(&opts.ExecProviderInstallHint, "exec-command-install-hint", "", "Text shown to the user when the --exec-command executable doesn't seem to be present")
|
||||
command.Flags().StringVar(&opts.ClusterEndpoint, "cluster-endpoint", "", "Cluster endpoint to use. Can be one of the following: 'kubeconfig', 'kube-public', or 'internal'.")
|
||||
}
|
||||
|
||||
@@ -4,8 +4,14 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -69,3 +75,109 @@ func Test_newCluster(t *testing.T) {
|
||||
assert.Nil(t, clusterWithBearerToken.Labels)
|
||||
assert.Nil(t, clusterWithBearerToken.Annotations)
|
||||
}
|
||||
|
||||
func TestGetKubePublicEndpoint(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
clusterInfo *corev1.ConfigMap
|
||||
expectedEndpoint string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "has public endpoint",
|
||||
clusterInfo: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-public",
|
||||
Name: "cluster-info",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"kubeconfig": kubeconfigFixture("https://test-cluster:6443"),
|
||||
},
|
||||
},
|
||||
expectedEndpoint: "https://test-cluster:6443",
|
||||
},
|
||||
{
|
||||
name: "no cluster-info",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no kubeconfig in cluster-info",
|
||||
clusterInfo: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-public",
|
||||
Name: "cluster-info",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"argo": "the project, not the movie",
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no clusters in cluster-info kubeconfig",
|
||||
clusterInfo: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-public",
|
||||
Name: "cluster-info",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"kubeconfig": kubeconfigFixture(""),
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "can't parse kubeconfig",
|
||||
clusterInfo: &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-public",
|
||||
Name: "cluster-info",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"kubeconfig": "this is not valid YAML",
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
objects := []runtime.Object{}
|
||||
if tc.clusterInfo != nil {
|
||||
objects = append(objects, tc.clusterInfo)
|
||||
}
|
||||
clientset := fake.NewSimpleClientset(objects...)
|
||||
endpoint, err := GetKubePublicEndpoint(clientset)
|
||||
if err != nil && !tc.expectError {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if err == nil && tc.expectError {
|
||||
t.Error("expected error to be returned, received none")
|
||||
}
|
||||
if endpoint != tc.expectedEndpoint {
|
||||
t.Errorf("expected endpoint %s, got %s", tc.expectedEndpoint, endpoint)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func kubeconfigFixture(endpoint string) string {
|
||||
kubeconfig := &clientcmdapiv1.Config{}
|
||||
if len(endpoint) > 0 {
|
||||
kubeconfig.Clusters = []clientcmdapiv1.NamedCluster{
|
||||
{
|
||||
Name: "test-kube",
|
||||
Cluster: clientcmdapiv1.Cluster{
|
||||
Server: endpoint,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
configYAML, err := yaml.Marshal(kubeconfig)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(configYAML)
|
||||
}
|
||||
|
||||
@@ -138,7 +138,10 @@ func readProjFromURI(fileURL string, proj *v1alpha1.AppProject) error {
|
||||
} else {
|
||||
err = config.UnmarshalRemoteFile(fileURL, &proj)
|
||||
}
|
||||
return fmt.Errorf("error reading proj from uri: %w", err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading proj from uri: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetProjSpecOptions(flags *pflag.FlagSet, spec *v1alpha1.AppProjectSpec, projOpts *ProjectOpts) int {
|
||||
|
||||
@@ -23,6 +23,7 @@ type RepoOptions struct {
|
||||
GitHubAppEnterpriseBaseURL string
|
||||
Proxy string
|
||||
GCPServiceAccountKeyPath string
|
||||
ForceHttpBasicAuth bool
|
||||
}
|
||||
|
||||
func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
@@ -44,4 +45,5 @@ func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
command.Flags().StringVar(&opts.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3")
|
||||
command.Flags().StringVar(&opts.Proxy, "proxy", "", "use proxy to access repository")
|
||||
command.Flags().StringVar(&opts.GCPServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&opts.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force use of basic auth when connecting repository via HTTP")
|
||||
}
|
||||
|
||||
@@ -318,6 +318,7 @@ func (m *ManifestResponse) GetSourceType() string {
|
||||
|
||||
type RepositoryResponse struct {
|
||||
IsSupported bool `protobuf:"varint,1,opt,name=isSupported,proto3" json:"isSupported,omitempty"`
|
||||
IsDiscoveryEnabled bool `protobuf:"varint,2,opt,name=isDiscoveryEnabled,proto3" json:"isDiscoveryEnabled,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@@ -363,6 +364,13 @@ func (m *RepositoryResponse) GetIsSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RepositoryResponse) GetIsDiscoveryEnabled() bool {
|
||||
if m != nil {
|
||||
return m.IsDiscoveryEnabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP
|
||||
// is able to accept.
|
||||
type ParametersAnnouncementResponse struct {
|
||||
@@ -472,42 +480,43 @@ func init() {
|
||||
func init() { proto.RegisterFile("cmpserver/plugin/plugin.proto", fileDescriptor_b21875a7079a06ed) }
|
||||
|
||||
var fileDescriptor_b21875a7079a06ed = []byte{
|
||||
// 558 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xc1, 0x6e, 0xd3, 0x4c,
|
||||
0x10, 0xae, 0x9b, 0xb4, 0x4d, 0x26, 0x95, 0xfe, 0x68, 0xf5, 0x0b, 0x4c, 0xd4, 0x86, 0xe0, 0x03,
|
||||
0xca, 0x85, 0x44, 0x32, 0x88, 0x1b, 0x12, 0x2d, 0x2a, 0xad, 0x40, 0x41, 0xd1, 0x96, 0x0b, 0xdc,
|
||||
0xb6, 0xce, 0x24, 0x59, 0x6a, 0xef, 0x2e, 0xeb, 0xb5, 0xa5, 0xc0, 0x85, 0xf7, 0xe0, 0x01, 0x78,
|
||||
0x15, 0x8e, 0x3c, 0x02, 0xca, 0x93, 0x20, 0xaf, 0xed, 0xd8, 0xa2, 0x6d, 0x38, 0x79, 0xe6, 0x9b,
|
||||
0x99, 0x6f, 0xbf, 0x9d, 0x99, 0x35, 0x1c, 0x07, 0x91, 0x8a, 0x51, 0xa7, 0xa8, 0xc7, 0x2a, 0x4c,
|
||||
0x16, 0x5c, 0x14, 0x9f, 0x91, 0xd2, 0xd2, 0x48, 0xb2, 0x9f, 0x7b, 0xbd, 0xb3, 0x05, 0x37, 0xcb,
|
||||
0xe4, 0x6a, 0x14, 0xc8, 0x68, 0xcc, 0xf4, 0x42, 0x2a, 0x2d, 0x3f, 0x59, 0xe3, 0x49, 0x30, 0x1b,
|
||||
0xa7, 0xfe, 0x58, 0xa3, 0x92, 0x05, 0x8d, 0x35, 0xb9, 0x91, 0x7a, 0x55, 0x33, 0x73, 0x3a, 0xef,
|
||||
0x9b, 0x03, 0xdd, 0x13, 0xa5, 0x2e, 0x8d, 0x46, 0x16, 0x51, 0xfc, 0x9c, 0x60, 0x6c, 0xc8, 0x0b,
|
||||
0x68, 0x45, 0x68, 0xd8, 0x8c, 0x19, 0xe6, 0x3a, 0x03, 0x67, 0xd8, 0xf1, 0x1f, 0x8e, 0x0a, 0x11,
|
||||
0x13, 0x26, 0xf8, 0x1c, 0x63, 0x53, 0xa4, 0x4e, 0x8a, 0xb4, 0x8b, 0x1d, 0xba, 0x29, 0x21, 0x1e,
|
||||
0x34, 0xe7, 0x3c, 0x44, 0x77, 0xd7, 0x96, 0x1e, 0x96, 0xa5, 0xaf, 0x79, 0x88, 0x17, 0x3b, 0xd4,
|
||||
0xc6, 0x4e, 0xdb, 0x70, 0xa0, 0x73, 0x0a, 0xef, 0x87, 0x03, 0xf7, 0xef, 0xa0, 0x25, 0x2e, 0x1c,
|
||||
0x30, 0xa5, 0xde, 0xb1, 0x08, 0xad, 0x90, 0x36, 0x2d, 0x5d, 0xd2, 0x07, 0x60, 0x4a, 0x51, 0x0c,
|
||||
0xa7, 0xcc, 0x2c, 0xed, 0x51, 0x6d, 0x5a, 0x43, 0x48, 0x0f, 0x5a, 0xc1, 0x12, 0x83, 0xeb, 0x38,
|
||||
0x89, 0xdc, 0x86, 0x8d, 0x6e, 0x7c, 0x42, 0xa0, 0x19, 0xf3, 0x2f, 0xe8, 0x36, 0x07, 0xce, 0xb0,
|
||||
0x41, 0xad, 0x4d, 0x3c, 0x68, 0xa0, 0x48, 0xdd, 0xbd, 0x41, 0x63, 0xd8, 0xf1, 0xbb, 0xa5, 0xe6,
|
||||
0x33, 0x91, 0x9e, 0x09, 0xa3, 0x57, 0x34, 0x0b, 0x7a, 0xcf, 0xa0, 0x55, 0x02, 0x19, 0x87, 0xa8,
|
||||
0x64, 0x59, 0x9b, 0xfc, 0x0f, 0x7b, 0x29, 0x0b, 0x13, 0x2c, 0xe4, 0xe4, 0x8e, 0x37, 0x85, 0x6e,
|
||||
0x75, 0xbd, 0x58, 0x49, 0x11, 0x23, 0x39, 0x82, 0x76, 0x54, 0x60, 0xb1, 0xeb, 0x0c, 0x1a, 0xc3,
|
||||
0x36, 0xad, 0x80, 0xec, 0x6e, 0xb1, 0x4c, 0x74, 0x80, 0xef, 0x57, 0xaa, 0x24, 0xab, 0x21, 0xde,
|
||||
0x73, 0x20, 0x74, 0x33, 0xc8, 0x0d, 0xe7, 0x00, 0x3a, 0x3c, 0xbe, 0x4c, 0x94, 0x92, 0xda, 0xe0,
|
||||
0xcc, 0x0a, 0x6b, 0xd1, 0x3a, 0xe4, 0x7d, 0x85, 0xfe, 0x94, 0x69, 0x16, 0xa1, 0x41, 0x1d, 0x9f,
|
||||
0x08, 0x21, 0x13, 0x11, 0x60, 0x84, 0xa2, 0xd2, 0xf5, 0x01, 0xee, 0xa9, 0x32, 0xa3, 0x9e, 0x90,
|
||||
0x8b, 0xec, 0xf8, 0x8f, 0x46, 0xb5, 0x0d, 0x9a, 0xde, 0x96, 0x49, 0xef, 0x20, 0xf0, 0x8e, 0xa0,
|
||||
0x99, 0x6d, 0x40, 0xd6, 0xa4, 0x60, 0x99, 0x88, 0x6b, 0x2b, 0xf0, 0x90, 0xe6, 0x8e, 0xff, 0x7d,
|
||||
0x17, 0x8e, 0x5f, 0x49, 0x31, 0xe7, 0x8b, 0x09, 0x13, 0x6c, 0x61, 0x6b, 0xa6, 0x76, 0x06, 0x97,
|
||||
0xa8, 0x53, 0x1e, 0x20, 0x79, 0x03, 0xdd, 0x73, 0x14, 0xa8, 0x99, 0xc1, 0xb2, 0x9d, 0xc4, 0x2d,
|
||||
0xe7, 0xf4, 0xf7, 0x0a, 0xf7, 0xdc, 0x9b, 0x0b, 0x9b, 0x5f, 0xd1, 0xdb, 0x19, 0x3a, 0xe4, 0x2d,
|
||||
0xfc, 0x37, 0x61, 0x26, 0x58, 0x56, 0x5d, 0xdc, 0x42, 0xd5, 0x2b, 0x23, 0x37, 0x7b, 0x6e, 0xc9,
|
||||
0x18, 0x3c, 0x38, 0x47, 0x73, 0x7b, 0x63, 0xb7, 0xd0, 0x3e, 0x2e, 0x23, 0xdb, 0x47, 0x92, 0x1d,
|
||||
0x71, 0xfa, 0xf2, 0xe7, 0xba, 0xef, 0xfc, 0x5a, 0xf7, 0x9d, 0xdf, 0xeb, 0xbe, 0xf3, 0xd1, 0xff,
|
||||
0xc7, 0xd3, 0xaf, 0x7e, 0x20, 0x4c, 0xf1, 0x20, 0xe4, 0x28, 0xcc, 0xd5, 0xbe, 0x7d, 0xee, 0x4f,
|
||||
0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x33, 0x34, 0xb3, 0x95, 0x5e, 0x04, 0x00, 0x00,
|
||||
// 576 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xdd, 0x6e, 0x12, 0x4f,
|
||||
0x14, 0xc0, 0xbb, 0x85, 0xb6, 0x70, 0x68, 0xf2, 0x27, 0x93, 0x7f, 0x74, 0x25, 0x2d, 0xe2, 0x5e,
|
||||
0x18, 0x6e, 0x84, 0x04, 0xbd, 0x35, 0xb1, 0x55, 0x6c, 0xa3, 0xc1, 0x90, 0xa9, 0x37, 0x7a, 0x37,
|
||||
0x1d, 0x0e, 0x30, 0x76, 0x77, 0x66, 0x9c, 0x99, 0xdd, 0x04, 0xbd, 0xf1, 0x3d, 0x7c, 0x00, 0x5f,
|
||||
0xc5, 0x4b, 0x1f, 0xc1, 0xf4, 0x49, 0x0c, 0xb3, 0xbb, 0x40, 0x6c, 0x8b, 0x57, 0x7b, 0x3e, 0x7f,
|
||||
0x7b, 0xbe, 0x32, 0x70, 0xcc, 0x13, 0x6d, 0xd1, 0x64, 0x68, 0xfa, 0x3a, 0x4e, 0x67, 0x42, 0x16,
|
||||
0x9f, 0x9e, 0x36, 0xca, 0x29, 0xb2, 0x9f, 0x6b, 0xad, 0xe1, 0x4c, 0xb8, 0x79, 0x7a, 0xd9, 0xe3,
|
||||
0x2a, 0xe9, 0x33, 0x33, 0x53, 0xda, 0xa8, 0x4f, 0x5e, 0x78, 0xc2, 0x27, 0xfd, 0x6c, 0xd0, 0x37,
|
||||
0xa8, 0x55, 0x81, 0xf1, 0xa2, 0x70, 0xca, 0x2c, 0x36, 0xc4, 0x1c, 0x17, 0x7d, 0x0b, 0xa0, 0x79,
|
||||
0xa2, 0xf5, 0x85, 0x33, 0xc8, 0x12, 0x8a, 0x9f, 0x53, 0xb4, 0x8e, 0x3c, 0x87, 0x5a, 0x82, 0x8e,
|
||||
0x4d, 0x98, 0x63, 0x61, 0xd0, 0x09, 0xba, 0x8d, 0xc1, 0xc3, 0x5e, 0x51, 0xc4, 0x88, 0x49, 0x31,
|
||||
0x45, 0xeb, 0x8a, 0xd0, 0x51, 0x11, 0x76, 0xbe, 0x43, 0x57, 0x29, 0x24, 0x82, 0xea, 0x54, 0xc4,
|
||||
0x18, 0xee, 0xfa, 0xd4, 0xc3, 0x32, 0xf5, 0xb5, 0x88, 0xf1, 0x7c, 0x87, 0x7a, 0xdf, 0x69, 0x1d,
|
||||
0x0e, 0x4c, 0x8e, 0x88, 0x7e, 0x04, 0x70, 0xff, 0x0e, 0x2c, 0x09, 0xe1, 0x80, 0x69, 0xfd, 0x8e,
|
||||
0x25, 0xe8, 0x0b, 0xa9, 0xd3, 0x52, 0x25, 0x6d, 0x00, 0xa6, 0x35, 0xc5, 0x78, 0xcc, 0xdc, 0xdc,
|
||||
0xff, 0xaa, 0x4e, 0x37, 0x2c, 0xa4, 0x05, 0x35, 0x3e, 0x47, 0x7e, 0x65, 0xd3, 0x24, 0xac, 0x78,
|
||||
0xef, 0x4a, 0x27, 0x04, 0xaa, 0x56, 0x7c, 0xc1, 0xb0, 0xda, 0x09, 0xba, 0x15, 0xea, 0x65, 0x12,
|
||||
0x41, 0x05, 0x65, 0x16, 0xee, 0x75, 0x2a, 0xdd, 0xc6, 0xa0, 0x59, 0xd6, 0x3c, 0x94, 0xd9, 0x50,
|
||||
0x3a, 0xb3, 0xa0, 0x4b, 0x67, 0xf4, 0x0c, 0x6a, 0xa5, 0x61, 0xc9, 0x90, 0xeb, 0xb2, 0xbc, 0x4c,
|
||||
0xfe, 0x87, 0xbd, 0x8c, 0xc5, 0x29, 0x16, 0xe5, 0xe4, 0x4a, 0x34, 0x86, 0xe6, 0xba, 0x3d, 0xab,
|
||||
0x95, 0xb4, 0x48, 0x8e, 0xa0, 0x9e, 0x14, 0x36, 0x1b, 0x06, 0x9d, 0x4a, 0xb7, 0x4e, 0xd7, 0x86,
|
||||
0x65, 0x6f, 0x56, 0xa5, 0x86, 0xe3, 0xfb, 0x85, 0x2e, 0x61, 0x1b, 0x96, 0x68, 0x0a, 0x84, 0xae,
|
||||
0x16, 0xb9, 0x62, 0x76, 0xa0, 0x21, 0xec, 0x45, 0xaa, 0xb5, 0x32, 0x0e, 0x27, 0xbe, 0xb0, 0x1a,
|
||||
0xdd, 0x34, 0x91, 0x1e, 0x10, 0x61, 0x5f, 0x09, 0xcb, 0x55, 0x86, 0x66, 0x31, 0x94, 0xec, 0x32,
|
||||
0xc6, 0x89, 0xe7, 0xd7, 0xe8, 0x2d, 0x9e, 0xe8, 0x2b, 0xb4, 0xc7, 0xcc, 0xb0, 0x04, 0x1d, 0x1a,
|
||||
0x7b, 0x22, 0xa5, 0x4a, 0x25, 0xc7, 0x04, 0xe5, 0xba, 0x8f, 0x0f, 0x70, 0x4f, 0x97, 0x11, 0x9b,
|
||||
0x01, 0x79, 0x53, 0x8d, 0xc1, 0xa3, 0xde, 0xc6, 0xc5, 0x8d, 0x6f, 0x8b, 0xa4, 0x77, 0x00, 0xa2,
|
||||
0x23, 0xa8, 0x2e, 0x2f, 0x66, 0x39, 0x54, 0x3e, 0x4f, 0xe5, 0x95, 0x6f, 0xe8, 0x90, 0xe6, 0xca,
|
||||
0xe0, 0xfb, 0x2e, 0x1c, 0xbf, 0x54, 0x72, 0x2a, 0x66, 0x23, 0x26, 0xd9, 0xcc, 0xe7, 0x8c, 0xfd,
|
||||
0xce, 0x2e, 0xd0, 0x64, 0x82, 0x23, 0x79, 0x03, 0xcd, 0x33, 0x94, 0x68, 0x98, 0xc3, 0x72, 0xfc,
|
||||
0x24, 0x2c, 0xf7, 0xfa, 0xf7, 0xc9, 0xb7, 0xc2, 0x9b, 0x07, 0x9e, 0xb7, 0x18, 0xed, 0x74, 0x03,
|
||||
0xf2, 0x16, 0xfe, 0x1b, 0x31, 0xc7, 0xe7, 0xeb, 0xa9, 0x6f, 0x41, 0xb5, 0x4a, 0xcf, 0xcd, 0x1d,
|
||||
0x79, 0x18, 0x83, 0x07, 0x67, 0xe8, 0x6e, 0x1f, 0xec, 0x16, 0xec, 0xe3, 0xd2, 0xb3, 0x7d, 0x25,
|
||||
0xcb, 0x5f, 0x9c, 0xbe, 0xf8, 0x79, 0xdd, 0x0e, 0x7e, 0x5d, 0xb7, 0x83, 0xdf, 0xd7, 0xed, 0xe0,
|
||||
0xe3, 0xe0, 0x1f, 0x4f, 0xc5, 0xfa, 0xc1, 0x61, 0x5a, 0xf0, 0x58, 0xa0, 0x74, 0x97, 0xfb, 0xfe,
|
||||
0x79, 0x78, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x23, 0x88, 0x8e, 0xd3, 0x8e, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -1025,6 +1034,16 @@ func (m *RepositoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.IsDiscoveryEnabled {
|
||||
i--
|
||||
if m.IsDiscoveryEnabled {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.IsSupported {
|
||||
i--
|
||||
if m.IsSupported {
|
||||
@@ -1247,6 +1266,9 @@ func (m *RepositoryResponse) Size() (n int) {
|
||||
if m.IsSupported {
|
||||
n += 2
|
||||
}
|
||||
if m.IsDiscoveryEnabled {
|
||||
n += 2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
@@ -1893,6 +1915,26 @@ func (m *RepositoryResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
}
|
||||
m.IsSupported = bool(v != 0)
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field IsDiscoveryEnabled", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.IsDiscoveryEnabled = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
|
||||
@@ -27,14 +27,19 @@ type PluginConfigSpec struct {
|
||||
Generate Command `json:"generate"`
|
||||
Discover Discover `json:"discover"`
|
||||
Parameters Parameters `yaml:"parameters"`
|
||||
PreserveFileMode bool `json:"preserveFileMode,omitempty"`
|
||||
}
|
||||
|
||||
//Discover holds find and fileName
|
||||
// Discover holds find and fileName
|
||||
type Discover struct {
|
||||
Find Find `json:"find"`
|
||||
FileName string `json:"fileName"`
|
||||
}
|
||||
|
||||
func (d Discover) IsDefined() bool {
|
||||
return d.FileName != "" || d.Find.Glob != "" || len(d.Find.Command.Command) > 0
|
||||
}
|
||||
|
||||
// Command holds binary path and arguments list
|
||||
type Command struct {
|
||||
Command []string `json:"command,omitempty"`
|
||||
@@ -84,9 +89,7 @@ func ValidatePluginConfig(config PluginConfig) error {
|
||||
if len(config.Spec.Generate.Command) == 0 {
|
||||
return fmt.Errorf("invalid plugin configuration file. spec.generate command should be non-empty")
|
||||
}
|
||||
if config.Spec.Discover.Find.Glob == "" && len(config.Spec.Discover.Find.Command.Command) == 0 && config.Spec.Discover.FileName == "" {
|
||||
return fmt.Errorf("invalid plugin configuration file. atleast one of discover.find.command or discover.find.glob or discover.fineName should be non-empty")
|
||||
}
|
||||
// discovery field is optional as apps can now specify plugin names directly
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/argoproj/pkg/rand"
|
||||
|
||||
@@ -73,9 +75,8 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
}
|
||||
logCtx := log.WithFields(log.Fields{"execID": execId})
|
||||
|
||||
// log in a way we can copy-and-paste into a terminal
|
||||
args := strings.Join(cmd.Args, " ")
|
||||
logCtx.WithFields(log.Fields{"dir": cmd.Dir}).Info(args)
|
||||
argsToLog := getCommandArgsToLog(cmd)
|
||||
logCtx.WithFields(log.Fields{"dir": cmd.Dir}).Info(argsToLog)
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
@@ -106,14 +107,42 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
logCtx.WithFields(log.Fields{"duration": duration}).Debug(output)
|
||||
|
||||
if err != nil {
|
||||
err := newCmdError(args, errors.New(err.Error()), strings.TrimSpace(stderr.String()))
|
||||
err := newCmdError(argsToLog, errors.New(err.Error()), strings.TrimSpace(stderr.String()))
|
||||
logCtx.Error(err.Error())
|
||||
return strings.TrimSuffix(output, "\n"), err
|
||||
}
|
||||
if len(output) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"stderr": stderr.String(),
|
||||
"command": command,
|
||||
}).Warn("Plugin command returned zero output")
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(output, "\n"), nil
|
||||
}
|
||||
|
||||
// getCommandArgsToLog represents the given command in a way that we can copy-and-paste into a terminal
|
||||
func getCommandArgsToLog(cmd *exec.Cmd) string {
|
||||
var argsToLog []string
|
||||
for _, arg := range cmd.Args {
|
||||
containsSpace := false
|
||||
for _, r := range arg {
|
||||
if unicode.IsSpace(r) {
|
||||
containsSpace = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if containsSpace {
|
||||
// add quotes and escape any internal quotes
|
||||
argsToLog = append(argsToLog, strconv.Quote(arg))
|
||||
} else {
|
||||
argsToLog = append(argsToLog, arg)
|
||||
}
|
||||
}
|
||||
args := strings.Join(argsToLog, " ")
|
||||
return args
|
||||
}
|
||||
|
||||
type CmdError struct {
|
||||
Args string
|
||||
Stderr string
|
||||
@@ -184,7 +213,7 @@ func (s *Service) generateManifestGeneric(stream GenerateManifestStream) error {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(ctx, stream, workDir)
|
||||
metadata, err := cmp.ReceiveRepoStream(ctx, stream, workDir, s.initConstants.PluginConfig.Spec.PreserveFileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate manifest error receiving stream: %w", err)
|
||||
}
|
||||
@@ -268,16 +297,16 @@ func (s *Service) matchRepositoryGeneric(stream MatchRepositoryStream) error {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir)
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir, s.initConstants.PluginConfig.Spec.PreserveFileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("match repository error receiving stream: %w", err)
|
||||
}
|
||||
|
||||
isSupported, err := s.matchRepository(bufferedCtx, workDir, metadata.GetEnv())
|
||||
isSupported, isDiscoveryEnabled, err := s.matchRepository(bufferedCtx, workDir, metadata.GetEnv())
|
||||
if err != nil {
|
||||
return fmt.Errorf("match repository error: %w", err)
|
||||
}
|
||||
repoResponse := &apiclient.RepositoryResponse{IsSupported: isSupported}
|
||||
repoResponse := &apiclient.RepositoryResponse{IsSupported: isSupported, IsDiscoveryEnabled: isDiscoveryEnabled}
|
||||
|
||||
err = stream.SendAndClose(repoResponse)
|
||||
if err != nil {
|
||||
@@ -286,8 +315,9 @@ func (s *Service) matchRepositoryGeneric(stream MatchRepositoryStream) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) matchRepository(ctx context.Context, workdir string, envEntries []*apiclient.EnvEntry) (bool, error) {
|
||||
func (s *Service) matchRepository(ctx context.Context, workdir string, envEntries []*apiclient.EnvEntry) (isSupported bool, isDiscoveryEnabled bool, err error) {
|
||||
config := s.initConstants.PluginConfig
|
||||
|
||||
if config.Spec.Discover.FileName != "" {
|
||||
log.Debugf("config.Spec.Discover.FileName is provided")
|
||||
pattern := filepath.Join(workdir, config.Spec.Discover.FileName)
|
||||
@@ -295,9 +325,9 @@ func (s *Service) matchRepository(ctx context.Context, workdir string, envEntrie
|
||||
if err != nil {
|
||||
e := fmt.Errorf("error finding filename match for pattern %q: %w", pattern, err)
|
||||
log.Debug(e)
|
||||
return false, e
|
||||
return false, true, e
|
||||
}
|
||||
return len(matches) > 0, nil
|
||||
return len(matches) > 0, true, nil
|
||||
}
|
||||
|
||||
if config.Spec.Discover.Find.Glob != "" {
|
||||
@@ -309,27 +339,23 @@ func (s *Service) matchRepository(ctx context.Context, workdir string, envEntrie
|
||||
if err != nil {
|
||||
e := fmt.Errorf("error finding glob match for pattern %q: %w", pattern, err)
|
||||
log.Debug(e)
|
||||
return false, e
|
||||
return false, true, e
|
||||
}
|
||||
|
||||
if len(matches) > 0 {
|
||||
return true, nil
|
||||
return len(matches) > 0, true, nil
|
||||
}
|
||||
|
||||
if len(config.Spec.Discover.Find.Command.Command) > 0 {
|
||||
log.Debugf("Going to try runCommand.")
|
||||
env := append(os.Environ(), environ(envEntries)...)
|
||||
find, err := runCommand(ctx, config.Spec.Discover.Find.Command, workdir, env)
|
||||
if err != nil {
|
||||
return false, true, fmt.Errorf("error running find command: %w", err)
|
||||
}
|
||||
return false, nil
|
||||
return find != "", true, nil
|
||||
}
|
||||
|
||||
log.Debugf("Going to try runCommand.")
|
||||
env := append(os.Environ(), environ(envEntries)...)
|
||||
|
||||
find, err := runCommand(ctx, config.Spec.Discover.Find.Command, workdir, env)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error running find command: %w", err)
|
||||
}
|
||||
|
||||
if find != "" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// ParametersAnnouncementStream defines an interface able to send/receive a stream of parameter announcements.
|
||||
@@ -349,7 +375,7 @@ func (s *Service) GetParametersAnnouncement(stream apiclient.ConfigManagementPlu
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir)
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir, s.initConstants.PluginConfig.Spec.PreserveFileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parameters announcement error receiving stream: %w", err)
|
||||
}
|
||||
@@ -358,7 +384,7 @@ func (s *Service) GetParametersAnnouncement(stream apiclient.ConfigManagementPlu
|
||||
return fmt.Errorf("illegal appPath: out of workDir bound")
|
||||
}
|
||||
|
||||
repoResponse, err := getParametersAnnouncement(bufferedCtx, appPath, s.initConstants.PluginConfig.Spec.Parameters.Static, s.initConstants.PluginConfig.Spec.Parameters.Dynamic)
|
||||
repoResponse, err := getParametersAnnouncement(bufferedCtx, appPath, s.initConstants.PluginConfig.Spec.Parameters.Static, s.initConstants.PluginConfig.Spec.Parameters.Dynamic, metadata.GetEnv())
|
||||
if err != nil {
|
||||
return fmt.Errorf("get parameters announcement error: %w", err)
|
||||
}
|
||||
@@ -370,11 +396,12 @@ func (s *Service) GetParametersAnnouncement(stream apiclient.ConfigManagementPlu
|
||||
return nil
|
||||
}
|
||||
|
||||
func getParametersAnnouncement(ctx context.Context, appDir string, announcements []*repoclient.ParameterAnnouncement, command Command) (*apiclient.ParametersAnnouncementResponse, error) {
|
||||
func getParametersAnnouncement(ctx context.Context, appDir string, announcements []*repoclient.ParameterAnnouncement, command Command, envEntries []*apiclient.EnvEntry) (*apiclient.ParametersAnnouncementResponse, error) {
|
||||
augmentedAnnouncements := announcements
|
||||
|
||||
if len(command.Command) > 0 {
|
||||
stdout, err := runCommand(ctx, command, appDir, os.Environ())
|
||||
env := append(os.Environ(), environ(envEntries)...)
|
||||
stdout, err := runCommand(ctx, command, appDir, env)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error executing dynamic parameter output command: %w", err)
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ message ManifestResponse {
|
||||
|
||||
message RepositoryResponse {
|
||||
bool isSupported = 1;
|
||||
bool isDiscoveryEnabled = 2;
|
||||
}
|
||||
|
||||
// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -99,11 +100,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by filename if file not found", func(t *testing.T) {
|
||||
// given
|
||||
@@ -113,11 +115,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match a pattern with a syntax error", func(t *testing.T) {
|
||||
// given
|
||||
@@ -127,7 +130,7 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
_, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
_, _, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.ErrorContains(t, err, "syntax error")
|
||||
@@ -142,11 +145,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by glob if not found", func(t *testing.T) {
|
||||
// given
|
||||
@@ -158,11 +162,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will throw an error for a bad pattern", func(t *testing.T) {
|
||||
// given
|
||||
@@ -174,7 +179,7 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
_, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
_, _, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.ErrorContains(t, err, "error finding glob match for pattern")
|
||||
@@ -191,11 +196,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by command when returns no output", func(t *testing.T) {
|
||||
// given
|
||||
@@ -209,11 +215,11 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will match plugin because env var defined", func(t *testing.T) {
|
||||
// given
|
||||
@@ -227,11 +233,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin because no env var defined", func(t *testing.T) {
|
||||
// given
|
||||
@@ -246,11 +253,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by command when command fails", func(t *testing.T) {
|
||||
// given
|
||||
@@ -264,11 +272,25 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin as discovery is not set", func(t *testing.T) {
|
||||
// given
|
||||
d := Discover{}
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.False(t, discovery)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -359,7 +381,7 @@ func Test_getParametersAnnouncement_empty_command(t *testing.T) {
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[]`},
|
||||
}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command, []*apiclient.EnvEntry{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements)
|
||||
}
|
||||
@@ -373,7 +395,7 @@ func Test_getParametersAnnouncement_no_command(t *testing.T) {
|
||||
err := yaml.Unmarshal([]byte(staticYAML), static)
|
||||
require.NoError(t, err)
|
||||
command := Command{}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command, []*apiclient.EnvEntry{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements)
|
||||
}
|
||||
@@ -390,7 +412,7 @@ func Test_getParametersAnnouncement_static_and_dynamic(t *testing.T) {
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[{"name": "dynamic-a"}, {"name": "dynamic-b"}]`},
|
||||
}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command, []*apiclient.EnvEntry{})
|
||||
require.NoError(t, err)
|
||||
expected := []*repoclient.ParameterAnnouncement{
|
||||
{Name: "dynamic-a"},
|
||||
@@ -406,7 +428,7 @@ func Test_getParametersAnnouncement_invalid_json(t *testing.T) {
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[`},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command)
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unexpected end of JSON input")
|
||||
}
|
||||
@@ -416,7 +438,7 @@ func Test_getParametersAnnouncement_bad_command(t *testing.T) {
|
||||
Command: []string{"exit"},
|
||||
Args: []string{"1"},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command)
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{})
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error executing dynamic parameter output command")
|
||||
}
|
||||
@@ -708,16 +730,17 @@ func TestService_GetParametersAnnouncement(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("successful response", func(t *testing.T) {
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", []string{"MUST_BE_SET=yep"})
|
||||
require.NoError(t, err)
|
||||
err = service.GetParametersAnnouncement(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
require.Len(t, s.response.ParameterAnnouncements, 1)
|
||||
assert.Equal(t, repoclient.ParameterAnnouncement{Name: "test-param", String_: "test-value"}, *s.response.ParameterAnnouncements[0])
|
||||
require.Len(t, s.response.ParameterAnnouncements, 2)
|
||||
assert.Equal(t, repoclient.ParameterAnnouncement{Name: "dynamic-test-param", String_: "yep"}, *s.response.ParameterAnnouncements[0])
|
||||
assert.Equal(t, repoclient.ParameterAnnouncement{Name: "test-param", String_: "test-value"}, *s.response.ParameterAnnouncements[1])
|
||||
})
|
||||
t.Run("out of bounds app", func(t *testing.T) {
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", []string{"MUST_BE_SET=yep"})
|
||||
require.NoError(t, err)
|
||||
// set a malicious app path on the metadata
|
||||
s.metadataRequest.Request.(*apiclient.AppStreamRequest_Metadata).Metadata.AppRelPath = "../out-of-bounds"
|
||||
@@ -725,4 +748,38 @@ func TestService_GetParametersAnnouncement(t *testing.T) {
|
||||
require.ErrorContains(t, err, "illegal appPath")
|
||||
require.Nil(t, s.response)
|
||||
})
|
||||
t.Run("fails when script fails", func(t *testing.T) {
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", []string{"WRONG_ENV_VAR=oops"})
|
||||
require.NoError(t, err)
|
||||
err = service.GetParametersAnnouncement(s)
|
||||
require.ErrorContains(t, err, "error executing dynamic parameter output command")
|
||||
require.Nil(t, s.response)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_getCommandArgsToLog(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "no spaces",
|
||||
args: []string{"sh", "-c", "cat"},
|
||||
expected: "sh -c cat",
|
||||
},
|
||||
{
|
||||
name: "spaces",
|
||||
args: []string{"sh", "-c", `echo "hello world"`},
|
||||
expected: `sh -c "echo \"hello world\""`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tcc := tc
|
||||
t.Run(tcc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.Equal(t, tcc.expected, getCommandArgsToLog(exec.Command(tcc.args[0], tcc.args[1:]...)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,15 @@ metadata:
|
||||
spec:
|
||||
version: v1.0
|
||||
init:
|
||||
command: [kustomize, version]
|
||||
command: [sh, -c]
|
||||
args:
|
||||
- |
|
||||
kustomize version
|
||||
generate:
|
||||
command: [sh, -c, "kustomize build"]
|
||||
command: [sh, -c]
|
||||
args:
|
||||
- |
|
||||
kustomize build
|
||||
discover:
|
||||
find:
|
||||
command: [sh, -c, find . -name kustomization.yaml]
|
||||
@@ -16,3 +22,12 @@ spec:
|
||||
static:
|
||||
- name: test-param
|
||||
string: test-value
|
||||
dynamic:
|
||||
command: [sh, -c]
|
||||
args:
|
||||
- |
|
||||
# Make sure env vars are making it to the plugin.
|
||||
if [ -z "$MUST_BE_SET" ]; then
|
||||
exit 1
|
||||
fi
|
||||
echo "[{\"name\": \"dynamic-test-param\", \"string\": \"$MUST_BE_SET\"}]"
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Default service addresses and URLS of Argo CD internal services
|
||||
@@ -156,6 +159,10 @@ const (
|
||||
// Ex: "http://grafana.example.com/d/yu5UH4MMz/deployments"
|
||||
// Ex: "Go to Dashboard|http://grafana.example.com/d/yu5UH4MMz/deployments"
|
||||
AnnotationKeyLinkPrefix = "link.argocd.argoproj.io/"
|
||||
|
||||
// AnnotationKeyAppSkipReconcile tells the Application to skip the Application controller reconcile.
|
||||
// Skip reconcile when the value is "true" or any other string values that can be strconv.ParseBool() to be true.
|
||||
AnnotationKeyAppSkipReconcile = "argocd.argoproj.io/skip-reconcile"
|
||||
)
|
||||
|
||||
// Environment variables for tuning and debugging Argo CD
|
||||
@@ -222,13 +229,7 @@ const (
|
||||
// DefaultCMPWorkDirName defines the work directory name used by the cmp-server
|
||||
DefaultCMPWorkDirName = "_cmp_server"
|
||||
|
||||
ConfigMapPluginDeprecationWarning = "argocd-cm plugins are deprecated, and support will be removed in v2.6. Upgrade your plugin to be installed via sidecar. https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/"
|
||||
|
||||
ConfigMapPluginCLIDeprecationWarning = "spec.plugin.name is set, which means this Application uses a plugin installed in the " +
|
||||
"argocd-cm ConfigMap. Installing plugins via that ConfigMap is deprecated in Argo CD v2.5. " +
|
||||
"Starting in Argo CD v2.6, this Application will fail to sync. Contact your Argo CD admin " +
|
||||
"to make sure an upgrade plan is in place. More info: " +
|
||||
"https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.4-2.5/"
|
||||
ConfigMapPluginDeprecationWarning = "argocd-cm plugins are deprecated, and support will be removed in v2.7. Upgrade your plugin to be installed via sidecar. https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -316,3 +317,10 @@ const (
|
||||
SecurityMedium = 2 // Could indicate malicious events, but has a high likelihood of being user/system error (i.e. access denied)
|
||||
SecurityLow = 1 // Unexceptional entries (i.e. successful access logs)
|
||||
)
|
||||
|
||||
// Common error messages
|
||||
const TokenVerificationError = "failed to verify the token"
|
||||
|
||||
var TokenVerificationErr = errors.New(TokenVerificationError)
|
||||
|
||||
var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission denied")
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/diff"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -37,6 +38,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
@@ -51,6 +53,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
"github.com/argoproj/argo-cd/v2/util/helm"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
settings_util "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
@@ -335,7 +338,7 @@ func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]b
|
||||
}
|
||||
|
||||
if !ctrl.canProcessApp(obj) {
|
||||
// Don't force refresh app if app belongs to a different controller shard
|
||||
// Don't force refresh app if app belongs to a different controller shard or is outside the allowed namespaces.
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -417,7 +420,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
nodes := make([]appv1.ResourceNode, 0)
|
||||
proj, err := ctrl.getAppProj(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get project: %w", err)
|
||||
}
|
||||
|
||||
orphanedNodesMap := make(map[kube.ResourceKey]appv1.ResourceNode)
|
||||
@@ -425,7 +428,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
if proj.Spec.OrphanedResources != nil {
|
||||
orphanedNodesMap, err = ctrl.stateCache.GetNamespaceTopLevelResources(a.Spec.Destination.Server, a.Spec.Destination.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get namespace top-level resources: %w", err)
|
||||
}
|
||||
warnOrphaned = proj.Spec.OrphanedResources.IsWarn()
|
||||
}
|
||||
@@ -435,12 +438,12 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
var live = &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(managedResource.LiveState), &live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to unmarshal live state of managed resources: %w", err)
|
||||
}
|
||||
var target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(managedResource.TargetState), &target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to unmarshal target state of managed resources: %w", err)
|
||||
}
|
||||
|
||||
if live == nil {
|
||||
@@ -456,7 +459,11 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
} else {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) bool {
|
||||
permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) {
|
||||
return ctrl.db.GetProjectClusters(context.TODO(), project)
|
||||
clusters, err := ctrl.db.GetProjectClusters(context.TODO(), project)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get project clusters: %w", err)
|
||||
}
|
||||
return clusters, nil
|
||||
})
|
||||
if !permitted {
|
||||
return false
|
||||
@@ -465,7 +472,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to iterate resource hierarchy: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -514,7 +521,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
|
||||
hosts, err := ctrl.getAppHosts(a, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get app hosts: %w", err)
|
||||
}
|
||||
return &appv1.ApplicationTree{Nodes: nodes, OrphanedNodes: orphanedNodes, Hosts: hosts}, nil
|
||||
}
|
||||
@@ -833,7 +840,7 @@ func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext b
|
||||
Message: err.Error(),
|
||||
})
|
||||
message := fmt.Sprintf("Unable to delete application resources: %v", err.Error())
|
||||
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonStatusRefreshed, Type: v1.EventTypeWarning}, message)
|
||||
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonStatusRefreshed, Type: v1.EventTypeWarning}, message, "")
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -937,7 +944,9 @@ func (ctrl *ApplicationController) removeProjectFinalizer(proj *appv1.AppProject
|
||||
|
||||
// shouldBeDeleted returns whether a given resource obj should be deleted on cascade delete of application app
|
||||
func (ctrl *ApplicationController) shouldBeDeleted(app *appv1.Application, obj *unstructured.Unstructured) bool {
|
||||
return !kube.IsCRD(obj) && !isSelfReferencedApp(app, kube.GetObjectRef(obj))
|
||||
return !kube.IsCRD(obj) && !isSelfReferencedApp(app, kube.GetObjectRef(obj)) &&
|
||||
!resourceutil.HasAnnotationOption(obj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDisableDeletion) &&
|
||||
!resourceutil.HasAnnotationOption(obj, helm.ResourcePolicyAnnotation, helm.ResourcePolicyKeep)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) getPermittedAppLiveObjects(app *appv1.Application, proj *appv1.AppProject, projectClusters func(project string) ([]*appv1.Cluster, error)) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
@@ -1293,7 +1302,7 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
|
||||
eventInfo.Type = v1.EventTypeWarning
|
||||
messages = append(messages, "failed:", state.Message)
|
||||
}
|
||||
ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "))
|
||||
ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "), "")
|
||||
ctrl.metricsServer.IncSync(app, state)
|
||||
}
|
||||
return nil
|
||||
@@ -1356,7 +1365,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
} else {
|
||||
var tree *appv1.ApplicationTree
|
||||
if tree, err = ctrl.getResourceTree(app, managedResources); err == nil {
|
||||
app.Status.Summary = tree.GetSummary()
|
||||
app.Status.Summary = tree.GetSummary(app)
|
||||
if err := ctrl.cache.SetAppResourcesTree(app.InstanceName(ctrl.namespace), tree); err != nil {
|
||||
logCtx.Errorf("Failed to cache resources tree: %v", err)
|
||||
return
|
||||
@@ -1430,7 +1439,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
if err != nil {
|
||||
logCtx.Errorf("Failed to cache app resources: %v", err)
|
||||
} else {
|
||||
app.Status.Summary = tree.GetSummary()
|
||||
app.Status.Summary = tree.GetSummary(app)
|
||||
}
|
||||
|
||||
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
|
||||
@@ -1469,6 +1478,13 @@ func resourceStatusKey(res appv1.ResourceStatus) string {
|
||||
return strings.Join([]string{res.Group, res.Kind, res.Namespace, res.Name}, "/")
|
||||
}
|
||||
|
||||
func currentSourceEqualsSyncedSource(app *appv1.Application) bool {
|
||||
if app.Spec.HasMultipleSources() {
|
||||
return app.Spec.Sources.Equals(app.Status.Sync.ComparedTo.Sources)
|
||||
}
|
||||
return app.Spec.Source.Equals(&app.Status.Sync.ComparedTo.Source)
|
||||
}
|
||||
|
||||
// needRefreshAppStatus answers if application status needs to be refreshed.
|
||||
// Returns true if application never been compared, has changed or comparison result has expired.
|
||||
// Additionally returns whether full refresh was requested or not.
|
||||
@@ -1487,14 +1503,12 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
refreshType = requestedType
|
||||
reason = fmt.Sprintf("%s refresh requested", refreshType)
|
||||
} else {
|
||||
if app.Spec.HasMultipleSources() {
|
||||
if (len(app.Spec.Sources) != len(app.Status.Sync.ComparedTo.Sources)) || !reflect.DeepEqual(app.Spec.Sources, app.Status.Sync.ComparedTo.Sources) {
|
||||
reason = "atleast one of the spec.sources differs"
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
}
|
||||
} else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) {
|
||||
if !currentSourceEqualsSyncedSource(app) {
|
||||
reason = "spec.source differs"
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
if app.Spec.HasMultipleSources() {
|
||||
reason = "at least one of the spec.sources differs"
|
||||
}
|
||||
} else if hardExpired || softExpired {
|
||||
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
|
||||
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
@@ -1571,11 +1585,11 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
logCtx := log.WithFields(log.Fields{"application": orig.QualifiedName()})
|
||||
if orig.Status.Sync.Status != newStatus.Sync.Status {
|
||||
message := fmt.Sprintf("Updated sync status: %s -> %s", orig.Status.Sync.Status, newStatus.Sync.Status)
|
||||
ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message)
|
||||
ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message, "")
|
||||
}
|
||||
if orig.Status.Health.Status != newStatus.Health.Status {
|
||||
message := fmt.Sprintf("Updated health status: %s -> %s", orig.Status.Health.Status, newStatus.Health.Status)
|
||||
ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message)
|
||||
ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message, "")
|
||||
}
|
||||
var newAnnotations map[string]string
|
||||
if orig.GetAnnotations() != nil {
|
||||
@@ -1710,7 +1724,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}
|
||||
}
|
||||
message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
|
||||
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message)
|
||||
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message, "")
|
||||
logCtx.Info(message)
|
||||
return nil
|
||||
}
|
||||
@@ -1773,6 +1787,27 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Only process given app if it exists in a watched namespace, or in the
|
||||
// control plane's namespace.
|
||||
if app.Namespace != ctrl.namespace && !glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
return false
|
||||
}
|
||||
|
||||
if annotations := app.GetAnnotations(); annotations != nil {
|
||||
if skipVal, ok := annotations[common.AnnotationKeyAppSkipReconcile]; ok {
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
if skipReconcile, err := strconv.ParseBool(skipVal); err == nil {
|
||||
if skipReconcile {
|
||||
logCtx.Debugf("Skipping Application reconcile based on annotation %s", common.AnnotationKeyAppSkipReconcile)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
logCtx.Debugf("Unable to determine if Application should skip reconcile based on annotation %s: %v", common.AnnotationKeyAppSkipReconcile, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ctrl.clusterFilter != nil {
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
@@ -1781,12 +1816,6 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
return ctrl.clusterFilter(cluster)
|
||||
}
|
||||
|
||||
// Only process given app if it exists in a watched namespace, or in the
|
||||
// control plane's namespace.
|
||||
if app.Namespace != ctrl.namespace && !glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -209,6 +209,55 @@ status:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
`
|
||||
|
||||
var fakeMultiSourceApp = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
uid: "123"
|
||||
name: my-app
|
||||
namespace: ` + test.FakeArgoCDNamespace + `
|
||||
spec:
|
||||
destination:
|
||||
namespace: ` + test.FakeDestNamespace + `
|
||||
server: https://localhost:6443
|
||||
project: default
|
||||
sources:
|
||||
- path: some/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
- path: some/other/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake.git
|
||||
syncPolicy:
|
||||
automated: {}
|
||||
status:
|
||||
operationState:
|
||||
finishedAt: 2018-09-21T23:50:29Z
|
||||
message: successfully synced
|
||||
operation:
|
||||
sync:
|
||||
revisions:
|
||||
- HEAD
|
||||
- HEAD
|
||||
phase: Succeeded
|
||||
startedAt: 2018-09-21T23:50:25Z
|
||||
syncResult:
|
||||
resources:
|
||||
- kind: RoleBinding
|
||||
message: |-
|
||||
rolebinding.rbac.authorization.k8s.io/always-outofsync reconciled
|
||||
rolebinding.rbac.authorization.k8s.io/always-outofsync configured
|
||||
name: always-outofsync
|
||||
namespace: default
|
||||
status: Synced
|
||||
revisions:
|
||||
- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
- bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
|
||||
sources:
|
||||
- path: some/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
- path: some/other/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake.git
|
||||
`
|
||||
|
||||
var fakeAppWithDestName = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
@@ -263,6 +312,10 @@ func newFakeApp() *argoappv1.Application {
|
||||
return createFakeApp(fakeApp)
|
||||
}
|
||||
|
||||
func newFakeMultiSourceApp() *argoappv1.Application {
|
||||
return createFakeApp(fakeMultiSourceApp)
|
||||
}
|
||||
|
||||
func newFakeAppWithDestMismatch() *argoappv1.Application {
|
||||
return createFakeApp(fakeAppWithDestMismatch)
|
||||
}
|
||||
@@ -857,101 +910,133 @@ func TestSetOperationStateOnDeletedApp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
|
||||
app := newFakeApp()
|
||||
now := metav1.Now()
|
||||
app.Status.ReconciledAt = &now
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: app.Spec.GetSource(),
|
||||
Destination: app.Spec.Destination,
|
||||
testCases := []struct {
|
||||
name string
|
||||
app *argoappv1.Application
|
||||
}{
|
||||
{
|
||||
name: "single-source app",
|
||||
app: newFakeApp(),
|
||||
},
|
||||
{
|
||||
name: "multi-source app",
|
||||
app: newFakeMultiSourceApp(),
|
||||
},
|
||||
}
|
||||
|
||||
// no need to refresh just reconciled application
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
app := tc.app
|
||||
now := metav1.Now()
|
||||
app.Status.ReconciledAt = &now
|
||||
|
||||
// refresh app using the 'deepest' requested comparison level
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
app.Status.Sync.ComparedTo.Sources = app.Spec.Sources
|
||||
} else {
|
||||
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
|
||||
}
|
||||
|
||||
// refresh application which status is not reconciled using latest commit
|
||||
app.Status.Sync = argoappv1.SyncStatus{Status: argoappv1.SyncStatusCodeUnknown}
|
||||
// no need to refresh just reconciled application
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
// refresh app using the 'deepest' requested comparison level
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
|
||||
{
|
||||
// refresh app using the 'latest' level if comparison expired
|
||||
app := app.DeepCopy()
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
}
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
|
||||
{
|
||||
// refresh app using the 'latest' level if comparison expired for hard refresh
|
||||
app := app.DeepCopy()
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: app.Spec.GetSource(),
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 2*time.Hour, 1*time.Minute)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
}
|
||||
// refresh application which status is not reconciled using latest commit
|
||||
app.Status.Sync = argoappv1.SyncStatus{Status: argoappv1.SyncStatusCodeUnknown}
|
||||
|
||||
{
|
||||
app := app.DeepCopy()
|
||||
// execute hard refresh if app has refresh annotation
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
app.Annotations = map[string]string{
|
||||
v1alpha1.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
|
||||
{
|
||||
app := app.DeepCopy()
|
||||
// ensure that CompareWithLatest level is used if application source has changed
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
// sample app source change
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
|
||||
Parameters: []argoappv1.HelmParameter{{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
}},
|
||||
}
|
||||
t.Run("refresh app using the 'latest' level if comparison expired", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
t.Run("refresh app using the 'latest' level if comparison expired for hard refresh", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
if app.Spec.HasMultipleSources() {
|
||||
app.Status.Sync.ComparedTo.Sources = app.Spec.Sources
|
||||
} else {
|
||||
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
|
||||
}
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 2*time.Hour, 1*time.Minute)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
})
|
||||
|
||||
t.Run("execute hard refresh if app has refresh annotation", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
app.Annotations = map[string]string{
|
||||
v1alpha1.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
|
||||
t.Run("ensure that CompareWithLatest level is used if application source has changed", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
// sample app source change
|
||||
if app.Spec.HasMultipleSources() {
|
||||
app.Spec.Sources[0].Helm = &argoappv1.ApplicationSourceHelm{
|
||||
Parameters: []argoappv1.HelmParameter{{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
}},
|
||||
}
|
||||
} else {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
|
||||
Parameters: []argoappv1.HelmParameter{{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1373,3 +1458,80 @@ func TestToAppKey(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_canProcessApp(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
ctrl.applicationNamespaces = []string{"good"}
|
||||
t.Run("without cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("without cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
ctrl.clusterFilter = func(_ *argoappv1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
ctrl.clusterFilter = func(_ *argoappv1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_canProcessAppSkipReconcileAnnotation(t *testing.T) {
|
||||
appSkipReconcileInvalid := newFakeApp()
|
||||
appSkipReconcileInvalid.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "invalid-value"}
|
||||
appSkipReconcileFalse := newFakeApp()
|
||||
appSkipReconcileFalse.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "false"}
|
||||
appSkipReconcileTrue := newFakeApp()
|
||||
appSkipReconcileTrue.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "true"}
|
||||
ctrl := newFakeController(&fakeData{})
|
||||
tests := []struct {
|
||||
name string
|
||||
input interface{}
|
||||
expected bool
|
||||
}{
|
||||
{"No skip reconcile annotation", newFakeApp(), true},
|
||||
{"Contains skip reconcile annotation ", appSkipReconcileInvalid, true},
|
||||
{"Contains skip reconcile annotation value false", appSkipReconcileFalse, true},
|
||||
{"Contains skip reconcile annotation value true", appSkipReconcileTrue, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, ctrl.canProcessApp(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_syncDeleteOption(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
cm := newFakeCM()
|
||||
t.Run("without delete option object is deleted", func(t *testing.T) {
|
||||
cmObj := kube.MustToUnstructured(&cm)
|
||||
delete := ctrl.shouldBeDeleted(app, cmObj)
|
||||
assert.True(t, delete)
|
||||
})
|
||||
t.Run("with delete set to false object is retained", func(t *testing.T) {
|
||||
cmObj := kube.MustToUnstructured(&cm)
|
||||
cmObj.SetAnnotations(map[string]string{"argocd.argoproj.io/sync-options": "Delete=false"})
|
||||
delete := ctrl.shouldBeDeleted(app, cmObj)
|
||||
assert.False(t, delete)
|
||||
})
|
||||
t.Run("with delete set to false object is retained", func(t *testing.T) {
|
||||
cmObj := kube.MustToUnstructured(&cm)
|
||||
cmObj.SetAnnotations(map[string]string{"helm.sh/resource-policy": "keep"})
|
||||
delete := ctrl.shouldBeDeleted(app, cmObj)
|
||||
assert.False(t, delete)
|
||||
})
|
||||
}
|
||||
|
||||
21
controller/cache/cache.go
vendored
21
controller/cache/cache.go
vendored
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
@@ -220,10 +221,10 @@ func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
|
||||
gv = schema.GroupVersion{}
|
||||
}
|
||||
parentRefs := make([]appv1.ResourceRef, len(r.OwnerRefs))
|
||||
for _, ownerRef := range r.OwnerRefs {
|
||||
for i, ownerRef := range r.OwnerRefs {
|
||||
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
|
||||
ownerKey := kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, r.Ref.Namespace, ownerRef.Name)
|
||||
parentRefs[0] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: r.Ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)}
|
||||
parentRefs[i] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: r.Ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)}
|
||||
}
|
||||
var resHealth *appv1.HealthStatus
|
||||
resourceInfo := resInfo(r)
|
||||
@@ -394,6 +395,20 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
return nil, fmt.Errorf("error getting custom label: %w", err)
|
||||
}
|
||||
|
||||
clusterCacheConfig := cluster.RESTConfig()
|
||||
// Controller dynamically fetches all resource types available on the cluster
|
||||
// using a discovery API that may contain deprecated APIs.
|
||||
// This causes log flooding when managing a large number of clusters.
|
||||
// https://github.com/argoproj/argo-cd/issues/11973
|
||||
// However, we can safely suppress deprecation warnings
|
||||
// because we do not rely on resources with a particular API group or version.
|
||||
// https://kubernetes.io/blog/2020/09/03/warnings/#customize-client-handling
|
||||
//
|
||||
// Completely suppress warning logs only for log levels that are less than Debug.
|
||||
if log.GetLevel() < log.DebugLevel {
|
||||
clusterCacheConfig.WarningHandler = rest.NoWarnings{}
|
||||
}
|
||||
|
||||
clusterCacheOpts := []clustercache.UpdateSettingsFunc{
|
||||
clustercache.SetListSemaphore(semaphore.NewWeighted(clusterCacheListSemaphoreSize)),
|
||||
clustercache.SetListPageSize(clusterCacheListPageSize),
|
||||
@@ -425,7 +440,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
clustercache.SetRetryOptions(clusterCacheAttemptLimit, clusterCacheRetryUseBackoff, isRetryableError),
|
||||
}
|
||||
|
||||
clusterCache = clustercache.NewClusterCache(cluster.RESTConfig(), clusterCacheOpts...)
|
||||
clusterCache = clustercache.NewClusterCache(clusterCacheConfig, clusterCacheOpts...)
|
||||
|
||||
_ = clusterCache.OnResourceUpdated(func(newRes *clustercache.Resource, oldRes *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) {
|
||||
toNotify := make(map[string]bool)
|
||||
|
||||
55
controller/cache/cache_test.go
vendored
55
controller/cache/cache_test.go
vendored
@@ -6,10 +6,11 @@ import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
|
||||
@@ -153,3 +154,51 @@ func TestIsRetryableError(t *testing.T) {
|
||||
assert.True(t, isRetryableError(connectionReset))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_asResourceNode_owner_refs(t *testing.T) {
|
||||
resNode := asResourceNode(&cache.Resource{
|
||||
ResourceVersion: "",
|
||||
Ref: v1.ObjectReference{
|
||||
APIVersion: "v1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-1",
|
||||
},
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-2",
|
||||
},
|
||||
},
|
||||
CreationTimestamp: nil,
|
||||
Info: nil,
|
||||
Resource: nil,
|
||||
})
|
||||
expected := appv1.ResourceNode{
|
||||
ResourceRef: appv1.ResourceRef{
|
||||
Version: "v1",
|
||||
},
|
||||
ParentRefs: []appv1.ResourceRef{
|
||||
{
|
||||
Group: "",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-1",
|
||||
},
|
||||
{
|
||||
Group: "",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-2",
|
||||
},
|
||||
},
|
||||
Info: nil,
|
||||
NetworkingInfo: nil,
|
||||
ResourceVersion: "",
|
||||
Images: nil,
|
||||
Health: nil,
|
||||
CreatedAt: nil,
|
||||
}
|
||||
assert.Equal(t, expected, resNode)
|
||||
}
|
||||
|
||||
@@ -2,13 +2,13 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"fmt"
|
||||
"github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user