mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
388 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
91aefabc5b | ||
|
|
56b8e2f356 | ||
|
|
101477a638 | ||
|
|
9bf7d1b95b | ||
|
|
da8e7b9697 | ||
|
|
eb183dcde1 | ||
|
|
ccecc693c2 | ||
|
|
dc1d4b060d | ||
|
|
512233806c | ||
|
|
b84dd8bbfa | ||
|
|
e6f37d7245 | ||
|
|
57bac9ac75 | ||
|
|
3034183791 | ||
|
|
26c87b3f16 | ||
|
|
bd8d26d444 | ||
|
|
a6c58748cd | ||
|
|
46af59e258 | ||
|
|
867660a709 | ||
|
|
b1addf5bf1 | ||
|
|
0a9d1607e2 | ||
|
|
09fc32e6cb | ||
|
|
1be9296e6c | ||
|
|
76fad02f4a | ||
|
|
2f8eb04b84 | ||
|
|
66aa0e6e01 | ||
|
|
955270eb0d | ||
|
|
225a0af9f7 | ||
|
|
5191cd077c | ||
|
|
c6b928c830 | ||
|
|
4e73b3c7ee | ||
|
|
708c9e79b9 | ||
|
|
da2c249814 | ||
|
|
c27cf3f95e | ||
|
|
dbd3ce3133 | ||
|
|
58062c45de | ||
|
|
3c61070411 | ||
|
|
697fc77379 | ||
|
|
6655a22b0a | ||
|
|
0d109279a8 | ||
|
|
4c1e1e0ad6 | ||
|
|
078cfe130b | ||
|
|
cd098638f8 | ||
|
|
2826a9215d | ||
|
|
13bef3a831 | ||
|
|
c6f80377a8 | ||
|
|
a8a451a84b | ||
|
|
cd5b2af358 | ||
|
|
f6f9fa2cd6 | ||
|
|
c7749ca67e | ||
|
|
3399a81bed | ||
|
|
80e5c55ca0 | ||
|
|
c4182aedc7 | ||
|
|
e4404372af | ||
|
|
12140f8152 | ||
|
|
6cfd394445 | ||
|
|
1998b016c0 | ||
|
|
74bc1731f9 | ||
|
|
bb28b3c697 | ||
|
|
0ee9993369 | ||
|
|
d8f845a126 | ||
|
|
5901b46785 | ||
|
|
88542a616f | ||
|
|
cbe4f1b92e | ||
|
|
a2b77983cd | ||
|
|
a94ca7fef1 | ||
|
|
8b1804c1f6 | ||
|
|
5398314f4e | ||
|
|
0e2148e331 | ||
|
|
87cabed73f | ||
|
|
b587e0cd3b | ||
|
|
194f0901fc | ||
|
|
ee1e3f27ac | ||
|
|
14bd4fd781 | ||
|
|
d053952fb8 | ||
|
|
0e12c3de0d | ||
|
|
1d765a072c | ||
|
|
19b98dd1a8 | ||
|
|
1eabf99dac | ||
|
|
216edee0af | ||
|
|
0137651d71 | ||
|
|
62e49739d0 | ||
|
|
0f22d57a66 | ||
|
|
33376164d0 | ||
|
|
c35c28009c | ||
|
|
da47fbb329 | ||
|
|
320407f1f6 | ||
|
|
ef91a8deb4 | ||
|
|
40a406164d | ||
|
|
15336984fa | ||
|
|
cec18790ce | ||
|
|
f99e5b9667 | ||
|
|
9ddadb0cec | ||
|
|
d315a82414 | ||
|
|
e2c9548b2c | ||
|
|
fc06da0150 | ||
|
|
5f0e58d5f0 | ||
|
|
721798e016 | ||
|
|
e07b3e7492 | ||
|
|
b9bc3f9278 | ||
|
|
530b007a2f | ||
|
|
a8dd613927 | ||
|
|
d75c826622 | ||
|
|
6866bae2fc | ||
|
|
385dec96f9 | ||
|
|
6d776b00e8 | ||
|
|
029655236b | ||
|
|
f600c1443e | ||
|
|
09e5b60519 | ||
|
|
6cfd81b9f9 | ||
|
|
1957fb48e3 | ||
|
|
b760a2786f | ||
|
|
e5e3b0f579 | ||
|
|
87d7710175 | ||
|
|
acfc396ac0 | ||
|
|
5b29320bf2 | ||
|
|
185e580317 | ||
|
|
697636eb69 | ||
|
|
044598197d | ||
|
|
5131b02d15 | ||
|
|
b777a76ae8 | ||
|
|
4265746298 | ||
|
|
248929cacd | ||
|
|
d2f5060103 | ||
|
|
86d04de0aa | ||
|
|
fdcaa550cc | ||
|
|
67cbe12f0b | ||
|
|
c7ff388b84 | ||
|
|
7594cad051 | ||
|
|
2738358df5 | ||
|
|
99b6a72555 | ||
|
|
c742563384 | ||
|
|
bcc69bdca4 | ||
|
|
712d9fee72 | ||
|
|
74940907a8 | ||
|
|
60de0b697f | ||
|
|
b208d376fe | ||
|
|
7a79458419 | ||
|
|
f936c6ed1f | ||
|
|
655be25f87 | ||
|
|
a7214a0985 | ||
|
|
6de5516da1 | ||
|
|
67e45d825e | ||
|
|
db547567b9 | ||
|
|
cc6c625401 | ||
|
|
fa46ac178f | ||
|
|
84519086db | ||
|
|
bf690d0548 | ||
|
|
2e0b2a17fb | ||
|
|
0a549a414f | ||
|
|
45272a18b4 | ||
|
|
edcd57a0ed | ||
|
|
33386df879 | ||
|
|
c305a02541 | ||
|
|
3c0854f7ea | ||
|
|
60eb2afdde | ||
|
|
6b783af5c6 | ||
|
|
1a47fa2b03 | ||
|
|
322c950be4 | ||
|
|
f9cbaa30fc | ||
|
|
89c9c62776 | ||
|
|
aa0944dc88 | ||
|
|
7fe25c3d3a | ||
|
|
2cdfafee4b | ||
|
|
1fe8a9b2b6 | ||
|
|
509b5501bf | ||
|
|
45b95bafbe | ||
|
|
4ce1091169 | ||
|
|
0a7dc06d8c | ||
|
|
24e24328c3 | ||
|
|
21f0aa3ccc | ||
|
|
14cdfc77da | ||
|
|
5c2391f71f | ||
|
|
d00fa536e2 | ||
|
|
5921e2a73b | ||
|
|
409f03ee51 | ||
|
|
4ef0eb7b9e | ||
|
|
b0cd653f94 | ||
|
|
af16a6fa2d | ||
|
|
e53a3f89f0 | ||
|
|
a5933db842 | ||
|
|
86a646f941 | ||
|
|
f11da56381 | ||
|
|
27d4edd423 | ||
|
|
3c00de86ea | ||
|
|
f782384dbc | ||
|
|
022697375d | ||
|
|
3659b7bb7d | ||
|
|
e9fae0df37 | ||
|
|
d003f3a60f | ||
|
|
b17414b301 | ||
|
|
f1e0c8487b | ||
|
|
ba0c249457 | ||
|
|
f892110318 | ||
|
|
0ff1ada720 | ||
|
|
59672fb9eb | ||
|
|
0f07dec74d | ||
|
|
e86465251a | ||
|
|
3f51d92dc6 | ||
|
|
c77cf66aa1 | ||
|
|
8847a310ad | ||
|
|
17f60b5166 | ||
|
|
ad3c315e0c | ||
|
|
9d76dd4494 | ||
|
|
0e927053a4 | ||
|
|
7600163eff | ||
|
|
d3220b909f | ||
|
|
f21336cb4c | ||
|
|
c59b8ea51d | ||
|
|
5b44356147 | ||
|
|
0ac0112a78 | ||
|
|
8be24f577f | ||
|
|
b50c82d552 | ||
|
|
b1ff9dbe1e | ||
|
|
2e65b42f05 | ||
|
|
32d33dedcc | ||
|
|
32be020af0 | ||
|
|
497e53b020 | ||
|
|
d7608ecc44 | ||
|
|
faa8869e91 | ||
|
|
a10b28ed90 | ||
|
|
f313db9d9f | ||
|
|
d71cdddd21 | ||
|
|
e370813646 | ||
|
|
19cff7f828 | ||
|
|
595b82546b | ||
|
|
8b17abc77e | ||
|
|
b144fbb5a1 | ||
|
|
af03b291d4 | ||
|
|
a6c664b2ae | ||
|
|
58341ea934 | ||
|
|
fb47accad6 | ||
|
|
9bf565787b | ||
|
|
f923cbe5fc | ||
|
|
1ce3c28256 | ||
|
|
ac6f9062db | ||
|
|
9186bad4e0 | ||
|
|
c569158853 | ||
|
|
af5f234bdb | ||
|
|
34bcaee8f6 | ||
|
|
7c0a3618c3 | ||
|
|
30415e0b0b | ||
|
|
51bab9c77f | ||
|
|
edc85a3aa8 | ||
|
|
687fe4049b | ||
|
|
737589ca66 | ||
|
|
0869116609 | ||
|
|
89c05ec831 | ||
|
|
2888a9dc84 | ||
|
|
cb0df2a1b0 | ||
|
|
9e9bab2ac9 | ||
|
|
969c7a1735 | ||
|
|
cf08916928 | ||
|
|
92956d50e4 | ||
|
|
3193a0b795 | ||
|
|
0336dc2f48 | ||
|
|
41db812474 | ||
|
|
48cf001461 | ||
|
|
cc53920e1c | ||
|
|
f8390c940a | ||
|
|
11d09a262d | ||
|
|
7cf6a5b190 | ||
|
|
f0b51daf36 | ||
|
|
006dc80664 | ||
|
|
334521a046 | ||
|
|
57d822f35e | ||
|
|
bbbb3c3939 | ||
|
|
f90514587b | ||
|
|
dafd182f49 | ||
|
|
7943533da7 | ||
|
|
5a502199f1 | ||
|
|
3754052054 | ||
|
|
6d19813c46 | ||
|
|
dab15e99ec | ||
|
|
bc52cdeaf2 | ||
|
|
d3198e2cda | ||
|
|
f36510200c | ||
|
|
5b78654f60 | ||
|
|
777ac0b9ba | ||
|
|
466ca3c244 | ||
|
|
7fac91022f | ||
|
|
f5bdce0706 | ||
|
|
637cee4017 | ||
|
|
3ea72eb489 | ||
|
|
f9b7384bf4 | ||
|
|
4e51b746e2 | ||
|
|
e9d4ae8504 | ||
|
|
51f15f14a6 | ||
|
|
6df2abbcb3 | ||
|
|
f29151eab7 | ||
|
|
df2d649751 | ||
|
|
67f551a579 | ||
|
|
d180fc69ed | ||
|
|
80bb21e02a | ||
|
|
70e55c8628 | ||
|
|
ae60b1749a | ||
|
|
fcb9f75607 | ||
|
|
82ed5ea94c | ||
|
|
b0bbfc63a9 | ||
|
|
b7912ac96d | ||
|
|
0a46d37fc6 | ||
|
|
662435fc2c | ||
|
|
a02f35e9e6 | ||
|
|
76eddafd1c | ||
|
|
c7da148978 | ||
|
|
ffa74bb9b6 | ||
|
|
0f560c417c | ||
|
|
bfe5ef48b4 | ||
|
|
9631667545 | ||
|
|
ebc89ce107 | ||
|
|
764b7a627d | ||
|
|
ac47a42a84 | ||
|
|
46d61044f5 | ||
|
|
53090f103b | ||
|
|
f219888d96 | ||
|
|
c504742263 | ||
|
|
badc828888 | ||
|
|
a4a97f0e42 | ||
|
|
1d88f03abb | ||
|
|
b5bdc8e437 | ||
|
|
13614e8981 | ||
|
|
52ba42389a | ||
|
|
d738e3be61 | ||
|
|
a0a7315080 | ||
|
|
de4ac14c9f | ||
|
|
483367c555 | ||
|
|
ae3d6be76a | ||
|
|
e2a2881a42 | ||
|
|
e94890685f | ||
|
|
d7fbc91006 | ||
|
|
5b907c13eb | ||
|
|
902b6aa9bb | ||
|
|
1faa9b04a4 | ||
|
|
1ee126d75b | ||
|
|
a84e993deb | ||
|
|
f462240520 | ||
|
|
40d05dd080 | ||
|
|
28d426e33a | ||
|
|
920aaced63 | ||
|
|
f059c99aa7 | ||
|
|
f1c972cae0 | ||
|
|
09a4a5f44f | ||
|
|
c86a2dd364 | ||
|
|
6081ed3612 | ||
|
|
4fc7177ce4 | ||
|
|
9bdf0bae2d | ||
|
|
5676ab5c9b | ||
|
|
36cff3758f | ||
|
|
4f27789453 | ||
|
|
427cde08a7 | ||
|
|
131eea6d2a | ||
|
|
c078bbb5cd | ||
|
|
52b13c288b | ||
|
|
f411ecf3bd | ||
|
|
12590cddb8 | ||
|
|
e01ab05d55 | ||
|
|
3953caa5c6 | ||
|
|
fd36de0b05 | ||
|
|
c5efbf4474 | ||
|
|
c5061a0d66 | ||
|
|
d0cbb1597c | ||
|
|
7178eff5c7 | ||
|
|
d4e9bdb032 | ||
|
|
af270add89 | ||
|
|
bb77664b6f | ||
|
|
0e7371dc26 | ||
|
|
b7519fafea | ||
|
|
4c255bb71a | ||
|
|
ac1254017f | ||
|
|
1c7cddbdf4 | ||
|
|
b6ac9ba9f6 | ||
|
|
78c2084f0f | ||
|
|
f4ced4670a | ||
|
|
6735066945 | ||
|
|
f391256516 | ||
|
|
8903851ed4 | ||
|
|
4d82b3e426 | ||
|
|
d2b5697dbd | ||
|
|
b009cdb57d | ||
|
|
ee97b7d96d | ||
|
|
98bec43aaa | ||
|
|
38f1f8e8a9 | ||
|
|
f9df915141 | ||
|
|
e901174a71 | ||
|
|
86ba98a0ba | ||
|
|
596a643c58 | ||
|
|
580a6960fc | ||
|
|
85c114d2ef | ||
|
|
d2a3a4db19 |
@@ -7,6 +7,7 @@ ignore:
|
||||
- "pkg/apis/client/.*"
|
||||
- "pkg/client/.*"
|
||||
- "vendor/.*"
|
||||
- "test/.*"
|
||||
coverage:
|
||||
status:
|
||||
# we've found this not to be useful
|
||||
|
||||
57
.github/workflows/ci-build.yaml
vendored
57
.github/workflows/ci-build.yaml
vendored
@@ -12,7 +12,11 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.17.6'
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-go:
|
||||
@@ -60,17 +64,24 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
version: v1.38.0
|
||||
args: --timeout 10m --exclude SA5011
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.45.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
@@ -131,6 +142,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
@@ -212,9 +226,6 @@ jobs:
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
- name: Initialize local Helm
|
||||
run: |
|
||||
helm2 init --client-only
|
||||
- name: Run codegen
|
||||
run: |
|
||||
set -x
|
||||
@@ -331,7 +342,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.21.2, v1.20.2, v1.19.2, v1.18.9, v1.17.11]
|
||||
k3s-version: [v1.23.3, v1.22.6, v1.21.2]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -344,6 +355,8 @@ jobs:
|
||||
ARGOCD_IN_CI: "true"
|
||||
ARGOCD_E2E_APISERVER_PORT: "8088"
|
||||
ARGOCD_SERVER: "127.0.0.1:8088"
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
@@ -376,10 +389,13 @@ jobs:
|
||||
- name: Add /usr/local/bin to PATH
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Add ./dist to PATH
|
||||
run: |
|
||||
echo "$(pwd)/dist" >> $GITHUB_PATH
|
||||
- name: Download Go dependencies
|
||||
run: |
|
||||
go mod download
|
||||
go get github.com/mattn/goreman
|
||||
go install github.com/mattn/goreman@latest
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
@@ -391,7 +407,7 @@ jobs:
|
||||
run: |
|
||||
docker pull quay.io/dexidp/dex:v2.25.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:6.2.6-alpine
|
||||
docker pull redis:7.0.0-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
@@ -408,7 +424,7 @@ jobs:
|
||||
count=1
|
||||
until curl -f http://127.0.0.1:8088/healthz; do
|
||||
sleep 10;
|
||||
if test $count -ge 60; then
|
||||
if test $count -ge 180; then
|
||||
echo "Timeout"
|
||||
exit 1
|
||||
fi
|
||||
@@ -424,20 +440,3 @@ jobs:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
if: ${{ failure() }}
|
||||
|
||||
lint-docs:
|
||||
name: Lint docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install -r docs/requirements.txt
|
||||
- name: Lint docs
|
||||
run: |
|
||||
make lint-docs
|
||||
|
||||
16
.github/workflows/codeql.yml
vendored
16
.github/workflows/codeql.yml
vendored
@@ -2,10 +2,17 @@ name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
# Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot
|
||||
branches-ignore:
|
||||
- 'dependabot/**'
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
@@ -16,15 +23,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
56
.github/workflows/image.yaml
vendored
56
.github/workflows/image.yaml
vendored
@@ -10,7 +10,11 @@ on:
|
||||
types: [ labeled, unlabeled, opened, synchronize, reopened ]
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.17.6'
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
@@ -45,18 +49,66 @@ jobs:
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Setup cache for argocd-ui docker layer
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-single-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-single-buildx
|
||||
|
||||
- name: Build cache for argocd-ui stage
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./src/github.com/argoproj/argo-cd
|
||||
target: argocd-ui
|
||||
push: false
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
|
||||
|
||||
- name: Run non-container Snyk scans
|
||||
if: github.event_name == 'push'
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
run: |
|
||||
npm install -g snyk
|
||||
|
||||
# Run with high threshold to fail build.
|
||||
snyk test --org=argoproj --all-projects --exclude=docs,site --severity-threshold=high --policy-path=.snyk
|
||||
snyk iac test manifests/install.yaml --org=argoproj --severity-threshold=high --policy-path=.snyk
|
||||
|
||||
- run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
then
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --push="${{ github.event_name == 'push' }}" \
|
||||
--cache-from "type=local,src=/tmp/.buildx-cache" \
|
||||
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Run container Snyk scan
|
||||
if: github.event_name == 'push'
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
run: |
|
||||
snyk container test quay.io/argoproj/argocd:latest --org=argoproj --file=Dockerfile --severity-threshold=high
|
||||
|
||||
# Temp fix
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
# https://github.com/moby/buildkit/issues/1896
|
||||
- name: Clean up build cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
|
||||
|
||||
# deploy
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
|
||||
119
.github/workflows/release.yaml
vendored
119
.github/workflows/release.yaml
vendored
@@ -2,17 +2,17 @@ name: Create ArgoCD release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'release-v*'
|
||||
- '!release-v1.5*'
|
||||
- '!release-v1.4*'
|
||||
- '!release-v1.3*'
|
||||
- '!release-v1.2*'
|
||||
- '!release-v1.1*'
|
||||
- '!release-v1.0*'
|
||||
- '!release-v0*'
|
||||
- "release-v*"
|
||||
- "!release-v1.5*"
|
||||
- "!release-v1.4*"
|
||||
- "!release-v1.3*"
|
||||
- "!release-v1.2*"
|
||||
- "!release-v1.1*"
|
||||
- "!release-v1.0*"
|
||||
- "!release-v0*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.17.6'
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
echo "=========== BEGIN COMMIT MESSAGE ============="
|
||||
git show ${SOURCE_TAG}
|
||||
echo "============ END COMMIT MESSAGE =============="
|
||||
|
||||
|
||||
# Quite dirty hack to get the release notes from the annotated tag
|
||||
# into a temporary file.
|
||||
RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX)
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -172,7 +172,6 @@ jobs:
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
helm2 init --client-only
|
||||
make manifests-local VERSION=${TARGET_VERSION}
|
||||
git diff
|
||||
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
|
||||
@@ -196,14 +195,16 @@ jobs:
|
||||
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64 --push -t ${IMAGE_NAMESPACE}/argocd:${TARGET_VERSION} -t argoproj/argocd:${TARGET_VERSION} .
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
./dist/argocd-linux-amd64 version --client
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
@@ -211,7 +212,7 @@ jobs:
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@v1
|
||||
with:
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
- name: Push changes to release branch
|
||||
@@ -220,7 +221,7 @@ jobs:
|
||||
git push origin ${TARGET_BRANCH}
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Create GitHub release
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -231,60 +232,52 @@ jobs:
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }}
|
||||
if: ${{ env.DRY_RUN == 'true' }}
|
||||
|
||||
- name: Upload argocd-linux-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
- name: Generate SBOM (spdx)
|
||||
id: spdx-builder
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-linux-amd64
|
||||
asset_name: argocd-linux-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
# defines the spdx/spdx-sbom-generator version to use.
|
||||
SPDX_GEN_VERSION: v0.0.13
|
||||
# defines the sigs.k8s.io/bom version to use.
|
||||
SIGS_BOM_VERSION: v0.2.1
|
||||
# comma delimited list of project relative folders to inspect for package
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
run: |
|
||||
yarn install --cwd ./ui
|
||||
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
|
||||
go install sigs.k8s.io/bom/cmd/bom@$SIGS_BOM_VERSION
|
||||
|
||||
# Generate SPDX for project dependencies analyzing package managers
|
||||
for folder in $(echo $PROJECT_FOLDERS | sed "s/,/ /g")
|
||||
do
|
||||
generator -p $folder -o /tmp
|
||||
done
|
||||
|
||||
# Generate SPDX for binaries analyzing the docker image
|
||||
if [[ ! -z $DOCKER_IMAGE ]]; then
|
||||
bom generate -o /tmp/bom-docker-image.spdx -i $DOCKER_IMAGE
|
||||
fi
|
||||
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-linux-arm64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
- name: Create GitHub release
|
||||
uses: softprops/action-gh-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-linux-arm64
|
||||
asset_name: argocd-linux-arm64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-darwin-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-darwin-amd64
|
||||
asset_name: argocd-darwin-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-darwin-arm64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-darwin-arm64
|
||||
asset_name: argocd-darwin-arm64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-windows-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-windows-amd64.exe
|
||||
asset_name: argocd-windows-amd64.exe
|
||||
asset_content_type: application/octet-stream
|
||||
name: ${{ env.RELEASE_TAG }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }}
|
||||
files: |
|
||||
dist/argocd-*
|
||||
/tmp/sbom.tar.gz
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Update homebrew formula
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,9 +16,10 @@ test-results
|
||||
.scratch
|
||||
node_modules/
|
||||
.kube/
|
||||
./test/cmp/*.sock
|
||||
|
||||
# ignore built binaries
|
||||
cmd/argocd/argocd
|
||||
cmd/argocd-application-controller/argocd-application-controller
|
||||
cmd/argocd-repo-server/argocd-repo-server
|
||||
cmd/argocd-server/argocd-server
|
||||
cmd/argocd-server/argocd-server
|
||||
|
||||
2
.gitpod.Dockerfile
vendored
2
.gitpod.Dockerfile
vendored
@@ -9,7 +9,7 @@ RUN curl -L https://go.kubebuilder.io/dl/2.3.1/$(go env GOOS)/$(go env GOARCH) |
|
||||
tar -xz -C /tmp/ && mv /tmp/kubebuilder_2.3.1_$(go env GOOS)_$(go env GOARCH) /usr/local/kubebuilder
|
||||
|
||||
RUN apt-get install redis-server -y
|
||||
RUN go get github.com/mattn/goreman
|
||||
RUN go install github.com/mattn/goreman@latest
|
||||
|
||||
USER gitpod
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ image:
|
||||
file: .gitpod.Dockerfile
|
||||
|
||||
tasks:
|
||||
- init: make mod-download-local dep-ui-local && GO111MODULE=off go get github.com/mattn/goreman
|
||||
- init: make mod-download-local dep-ui-local && GO111MODULE=off go install github.com/mattn/goreman@latest
|
||||
command: make start-test-k8s
|
||||
@@ -1,22 +0,0 @@
|
||||
run:
|
||||
timeout: 2m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go"
|
||||
skip-dirs:
|
||||
- pkg/client/
|
||||
- vendor/
|
||||
linters:
|
||||
enable:
|
||||
- vet
|
||||
- deadcode
|
||||
- goimports
|
||||
- varcheck
|
||||
- structcheck
|
||||
- ineffassign
|
||||
- unconvert
|
||||
- unparam
|
||||
linters-settings:
|
||||
goimports:
|
||||
local-prefixes: github.com/argoproj/argo-cd
|
||||
service:
|
||||
golangci-lint-version: 1.21.0
|
||||
22
.snyk
Normal file
22
.snyk
Normal file
@@ -0,0 +1,22 @@
|
||||
# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
|
||||
version: v1.22.1
|
||||
# ignores vulnerabilities until expiry date; change duration by modifying expiry date
|
||||
ignore:
|
||||
SNYK-JS-ANSIREGEX-1583908:
|
||||
- '*':
|
||||
reason: >-
|
||||
Code is only run client-side in the swagger-ui endpoint. No risk of
|
||||
server-side DoS.
|
||||
SNYK-CC-K8S-44:
|
||||
- 'manifests/core-install.yaml > *':
|
||||
reason: >-
|
||||
Argo CD needs wide permissions to manage resources.
|
||||
- 'manifests/install.yaml > *':
|
||||
reason: >-
|
||||
Argo CD needs wide permissions to manage resources.
|
||||
SNYK-JS-MOMENT-2440688:
|
||||
- '*':
|
||||
reason: >-
|
||||
Code is only run client-side. No risk of directory traversal.
|
||||
patch: {}
|
||||
|
||||
202
CHANGELOG.md
202
CHANGELOG.md
@@ -1,5 +1,207 @@
|
||||
# Changelog
|
||||
|
||||
## v2.4.0 (Unreleased)
|
||||
|
||||
### Web Terminal In Argo CD UI
|
||||
|
||||
Feature enables engineers to start a shell in the running application container without leaving the web interface. Just find the required Kubernetes
|
||||
Pod using the Application Details page, click on it and select the Terminal tab. The shell starts automatically and enables you to execute the required
|
||||
commands, and helps to troubleshoot the application state.
|
||||
|
||||
### Access Control For Pod Logs & Web Terminal
|
||||
|
||||
Argo CD is used to manage the critical infrastructure of multiple organizations, which makes security the top priority of the project. We've listened to
|
||||
your feedback and introduced additional access control settings that control access to Kubernetes Pod logs and the new Web Terminal feature.
|
||||
|
||||
#### Known UI Issue for Pod Logs Access
|
||||
|
||||
Currently, upon pressing the "LOGS" tab in pod view by users who don't have an explicit allow get logs policy, the red "unable to load data: Internal error" is received in the bottom of the screen, and "Failed to load data, please try again" is displayed.
|
||||
|
||||
### OpenTelemetry Tracing Integration
|
||||
|
||||
The new feature allows emitting richer telemetry data that might make identifying performance bottlenecks easier. The new feature is available for argocd-server
|
||||
and argocd-repo-server components and can be enabled using the --otlp-address flag.
|
||||
|
||||
### Power PC and IBM Z Support
|
||||
|
||||
The list of supported architectures has been expanded, and now includes IBM Z (s390x) and PowerPC (ppc64le). Starting with the v2.4 release the official quay.io
|
||||
repository is going to have images for amd64, arm64, ppc64le, and s390x architectures.
|
||||
|
||||
### Other Notable Changes
|
||||
|
||||
Overall v2.4 release includes more than 300 hundred commits from nearly 90 contributors. Here is a short sample of the contributions:
|
||||
|
||||
* Enforce the deployment to remote clusters only
|
||||
* Native support of GCP authentication for GKE
|
||||
* Secured Redis connection
|
||||
* ApplicationSet Gitea support
|
||||
|
||||
|
||||
## v2.3.3 (2022-03-29)
|
||||
|
||||
- fix: prevent excessive repo-server disk usage for large repos (#8845) (#8897)
|
||||
- fix: Set QPS and burst rate for resource ops client (#8915)
|
||||
|
||||
## v2.3.2 (2022-03-22)
|
||||
|
||||
- fix: application resource APIs must enforce project restrictions
|
||||
|
||||
## v2.3.1 (2022-03-10)
|
||||
|
||||
- fix: Retry checkbox unchecked unexpectedly; Sync up with YAML (#8682) (#8720)
|
||||
- chore: Bump stable version of application set addon (#8744)
|
||||
- fix: correct jsonnet paths resolution (#8721)
|
||||
- fix(ui): Applications page incorrectly resets to tiles view. Fixes #8702 (#8718)
|
||||
|
||||
## v2.3.0 (2022-03-05)
|
||||
|
||||
### Argo CD ApplicationSet and Notifications are now part of Argo CD
|
||||
|
||||
Two popular [Argoproj Labs](https://github.com/argoproj-labs) projects [Argo CD ApplicationSet](https://github.com/argoproj/applicationset) and
|
||||
[Argo CD Notifications](https://github.com/argoproj-labs/argocd-notifications) are now part of Argo CD! The default Argo CD installation manifests now
|
||||
bundle both projects out of the box. Going forward you can expect more tightened integration of these projects into Argo CD.
|
||||
|
||||
### New sync and diff strategies
|
||||
|
||||
Users can now configure the Application resource to instruct Argo CD to consider the ignore difference setup during the sync process.
|
||||
In order to do so, add the new sync option RespectIgnoreDifferences=true in the Application resource. Once the sync option is added,
|
||||
Argo CD won't change ignored fields during the syncing process.
|
||||
|
||||
Configuring ignored fields is also easier now. Instead of listing fields one by one users can now leverage the
|
||||
managedFields metadata to instruct Argo CD about trusted managers and automatically ignore any fields owned by them. A new diff customization
|
||||
(managedFieldsManagers) is now available allowing users to specify managers the application should trust and to ignore all fields owned by those managers.
|
||||
Read more about these changes at [New sync and diff strategies in ArgoCD](https://blog.argoproj.io/new-sync-and-diff-strategies-in-argocd-44195d3f8b8c) blog post.
|
||||
|
||||
### ARM Images
|
||||
|
||||
An officially supported ARM 64 image is now available. Enjoy running Argo CD on your Raspberry Pi! Additionally, the image size was reduced by nearly ~50%
|
||||
and is only 200MB now. The ARM version of `argocd` CLI is also available and published as a Github release artifact.
|
||||
|
||||
### Compact Tree View And Click Application Navigation
|
||||
|
||||
The application details page now supports compact application resources tree visualization. Using the "Group Nodes" button, you can collapse the similar resources
|
||||
into a single group node to remove the clutter and make it easier to understand the state of application resources. You still can get detailed information about the collapsed resources by clicking on the group node. The list of collapsed resources will be available in a sliding panel. Compact resource tree is still too big?
|
||||
You can use the zoom in and zoom out feature to make it smaller - or even larger!
|
||||
|
||||
You no longer need to move back and forth between the application details page and the application list page. Instead you can navigate directly to the required application by clicking the search icon in the application details page title.
|
||||
|
||||
### Upgraded Config Management Tools
|
||||
|
||||
Both bundled Helm and Kustomize binaries have been upgraded to the latest versions. Kustomize has been upgraded from 4.2.0 to 4.4.1 and Helm has been upgraded from 3.7.1 to 3.8.0.
|
||||
|
||||
### Bug Fixes and Performance Enhancements
|
||||
|
||||
* Config management tools enhancements:
|
||||
* The skipCrds flag and ability to ignore missing values files for Helm (#8012, #8003)
|
||||
* Additional environment variables for Kustomize (#8096)
|
||||
* Argo CD CLI follows the XDG Base directory standard (#7638)
|
||||
* Redis is no longer used during SSO login (#8241)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
- feat: Add app list and details page views to navigation history (#7776) (#7937)
|
||||
- feat: Add skipCrds flag for helm charts (#8012)
|
||||
- feat: Add visual indicator for newly created pods (#8006)
|
||||
- feat: Added a new Helm option ignoreMissingValueFiles (#7767) (#8003)
|
||||
- feat: Allow configuring system wide ignore differences for all resources (#8224)
|
||||
- feat: Allow escaping dollar in Envsubst (#7961)
|
||||
- feat: Allow external links on Application (#3487) (#8231)
|
||||
- feat: Allow selecting application on detail page (#8176)
|
||||
- feat: Bundle applicationset-controller with argocd (#8148)
|
||||
- feat: Enable specifying root ca for oidc (#6712)
|
||||
- feat: Expose ARGOCD_APP_NAME to the `kustomize build` command (#8096)
|
||||
- feat: Ignore differences owned by trusted managers from managedFields (#7869)
|
||||
- feat: New sync option to use ignore diff configs during sync (#8078)
|
||||
- feat: Provide address flag for admin dashboard command (#8095)
|
||||
- feat: Store "Group Nodes" button state in application details preferences (#8036)
|
||||
- feat: Support specifying cluster by name in addition to API server URL in Cluster API (#8077)
|
||||
- feat: Support XDG Base directory standard (#7638) (#7791)
|
||||
- feat: Use encrypted cookie to store OAuth2 state nonce (instead of redis) (#8241)
|
||||
- feat: Build images on PR and conditionally build arm64 image on push (#8108)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- fix: Add "Restarting MinIO" status to MiniO Tenant health check (#8191)
|
||||
- fix: Add all resources in list view (#7295)
|
||||
- fix: Adding pagination to grouped nodes sliding panel#7837 (#7915)
|
||||
- fix: Allow all resources to add external links (#7923)
|
||||
- fix: Always call ValidateDestination (#7976)
|
||||
- fix: Application exist panic when execute api call (#8188)
|
||||
- fix: Application-icons-alignment (#8054)
|
||||
- fix: Controller panics if resource manifest has incorrect annotation (#8022)
|
||||
- fix: Correctly handle project field during partial cluster update (#7994)
|
||||
- fix: Default value for retry validation #8055 (#8064)
|
||||
- fix: Fix a possible crash when parsing RBAC (#8165)
|
||||
- fix: Grouped node list missing resources on Compact resources view #8014 (#8018)
|
||||
- fix: Issue with headless installation (#7958)
|
||||
- fix: Issue with project scoped resources (#8048)
|
||||
- fix: Kubernetes labels normalization for Prometheus (#7925)
|
||||
- fix: Nested Refresh dropdown does not work on Application Details page #1524 (#7950)
|
||||
- fix: Network line colors and menu icon alignment (#8059)
|
||||
- fix: Opening app details shows UI error on some apps (#8016) (#8019)
|
||||
- fix: Parse to correct uint32 type (#8177)
|
||||
- fix: Prevent possible nil-pointer deref in normalizer (#8185)
|
||||
- fix: Prevent possible out-of-bounds access when loading policies (#8186)
|
||||
- fix: Provide a semantic version parsed version for KUBE_VERSION (#8250)
|
||||
- fix: Refreshing label toast (#7979)
|
||||
- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971)
|
||||
- fix: Retry disabled text (#8004)
|
||||
- fix: Route health check stuck in 'Progressing' (#8170)
|
||||
- fix: Sync window panel is crashed if resource name not contain letters (#8053)
|
||||
- fix: Targetervision compatible without prefix refs/heads or refs/tags (#7939)
|
||||
- fix: Trailing line in Filter Dropdown Menus #7821 (#8001)
|
||||
- fix: Webhook URL matching edge cases (#7981)
|
||||
- fix(ui): Use consistent case for diff modes (#7945)
|
||||
- fix: Use gRPC timeout for sidecar CMPs (#8131) (#8236)
|
||||
|
||||
### Other
|
||||
|
||||
- chore: Bump go-jsonnet to v0.18.0 (#8011)
|
||||
- chore: Escape proj in regex (#7985)
|
||||
- chore: Exclude argocd-server rbac for core-install (#8234)
|
||||
- chore: Log out the resource triggering reconciliation (#8192)
|
||||
- chore: Migrate to use golang-jwt/jwt v4.2.0 (#8136)
|
||||
- chore: Move resolveRevision from api-server to repo-server (#7966)
|
||||
- chore: Update notifications version (#8267)
|
||||
- chore: Update slack version (#8299)
|
||||
- chore: Update to Redis 6.2.4 (#8157)
|
||||
- chore: Upgrade awscli to 2.4.6 and remove python deps (#7947)
|
||||
- chore: Upgrade base image to ubuntu:21.10 (#8230)
|
||||
- chore: Upgrade dex to v2.30.2 (https://github.com/dexidp/dex/issues/2326) (#8237)
|
||||
- chore: Upgrade gitops engine (#8288)
|
||||
- chore: Upgrade golang to 1.17.6 (#8229)
|
||||
- chore: Upgrade helm to most recent version (v3.7.2) (#8226)
|
||||
- chore: Upgrade k8s client to v1.23 (#8213)
|
||||
- chore: Upgrade kustomize to most recent version (v4.4.1) (#8227)
|
||||
- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133)
|
||||
- refactor: Move project filtering to server side (#8102)
|
||||
|
||||
## v2.2.3 (2022-01-18)
|
||||
|
||||
- fix: Application exist panic when execute api call (#8188)
|
||||
- fix: Route health check stuck in 'Progressing' (#8170)
|
||||
- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133)
|
||||
- chore: Update to Redis 6.2.4 (#8157) (#8158)
|
||||
|
||||
## v2.2.2 (2021-12-31)
|
||||
|
||||
- fix: Issue with project scoped resources (#8048)
|
||||
- fix: Escape proj in regex (#7985)
|
||||
- fix: Default value for retry validation #8055 (#8064)
|
||||
- fix: Sync window panel is crashed if resource name not contain letters (#8053)
|
||||
- fix: Upgrade github.com/argoproj/gitops-engine to v0.5.2
|
||||
- fix: Retry disabled text (#8004)
|
||||
- fix: Opening app details shows UI error on some apps (#8016) (#8019)
|
||||
- fix: Correctly handle project field during partial cluster update (#7994)
|
||||
- fix: Cluster API does not support updating labels and annotations (#7901)
|
||||
|
||||
## v2.2.1 (2021-12-16)
|
||||
|
||||
- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971)
|
||||
- fix: Issue with headless installation (#7958)
|
||||
- fix: Nil pointer (#7905)
|
||||
|
||||
## v2.2.0 (2021-12-14)
|
||||
|
||||
> [Upgrade instructions](./docs/operator-manual/upgrading/2.1-2.2.md)
|
||||
|
||||
80
Dockerfile
80
Dockerfile
@@ -1,16 +1,17 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:21.10
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.17.6 as builder
|
||||
FROM docker.io/library/golang:1.18 AS builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
openssh-server \
|
||||
nginx \
|
||||
unzip \
|
||||
fcgiwrap \
|
||||
git \
|
||||
git-lfs \
|
||||
@@ -24,20 +25,16 @@ RUN apt-get update && apt-get install -y \
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
ADD hack/install.sh .
|
||||
ADD hack/installers installers
|
||||
ADD hack/tool-versions.sh .
|
||||
COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
|
||||
RUN ./install.sh ksonnet-linux
|
||||
RUN ./install.sh helm2-linux
|
||||
RUN ./install.sh helm-linux
|
||||
RUN ./install.sh kustomize-linux
|
||||
RUN ./install.sh awscli-linux
|
||||
RUN ./install.sh helm-linux && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
####################################################################################################
|
||||
FROM $BASE_IMAGE as argocd-base
|
||||
FROM $BASE_IMAGE AS argocd-base
|
||||
|
||||
USER root
|
||||
|
||||
@@ -50,35 +47,31 @@ RUN groupadd -g 999 argocd && \
|
||||
chmod g=u /home/argocd && \
|
||||
apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install -y git git-lfs tini gpg tzdata && \
|
||||
apt-get install -y \
|
||||
git git-lfs tini gpg tzdata && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
|
||||
COPY hack/gpg-wrapper.sh /usr/local/bin/gpg-wrapper.sh
|
||||
COPY hack/git-verify-wrapper.sh /usr/local/bin/git-verify-wrapper.sh
|
||||
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
|
||||
COPY --from=builder /usr/local/bin/helm2 /usr/local/bin/helm2
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
|
||||
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
|
||||
COPY --from=builder /usr/local/aws-cli/v2/current/dist /usr/local/aws-cli/v2/current/dist
|
||||
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
# keep uid_entrypoint.sh for backward compatibility
|
||||
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
|
||||
RUN ln -s /usr/local/aws-cli/v2/current/dist/aws /usr/local/bin/aws
|
||||
|
||||
# support for mounting configuration from a configmap
|
||||
RUN mkdir -p /app/config/ssh && \
|
||||
touch /app/config/ssh/ssh_known_hosts && \
|
||||
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
|
||||
WORKDIR /app/config/ssh
|
||||
RUN touch ssh_known_hosts && \
|
||||
ln -s ssh_known_hosts /etc/ssh/ssh_known_hosts
|
||||
|
||||
RUN mkdir -p /app/config/tls
|
||||
RUN mkdir -p /app/config/gpg/source && \
|
||||
mkdir -p /app/config/gpg/keys && \
|
||||
chown argocd /app/config/gpg/keys && \
|
||||
chmod 0700 /app/config/gpg/keys
|
||||
WORKDIR /app/config
|
||||
RUN mkdir -p tls && \
|
||||
mkdir -p gpg/source && \
|
||||
mkdir -p gpg/keys && \
|
||||
chown argocd gpg/keys && \
|
||||
chmod 0700 gpg/keys
|
||||
|
||||
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
|
||||
ENV USER=argocd
|
||||
|
||||
USER 999
|
||||
@@ -87,14 +80,15 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM docker.io/library/node:12.18.4 as argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
ADD ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
|
||||
RUN yarn install --network-timeout 100000
|
||||
RUN yarn install --network-timeout 200000 && \
|
||||
yarn cache clean
|
||||
|
||||
ADD ["ui/", "."]
|
||||
COPY ["ui/", "."]
|
||||
|
||||
ARG ARGO_VERSION=latest
|
||||
ENV ARGO_VERSION=$ARGO_VERSION
|
||||
@@ -103,19 +97,19 @@ RUN HOST_ARCH='amd64' NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OPTION
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.17.6 as argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.18 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
COPY go.* ./
|
||||
RUN go mod download
|
||||
|
||||
# Perform the build
|
||||
COPY . .
|
||||
COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app
|
||||
RUN make argocd-all
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make argocd-all
|
||||
|
||||
####################################################################################################
|
||||
# Final image
|
||||
@@ -124,11 +118,13 @@ FROM argocd-base
|
||||
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
|
||||
|
||||
USER root
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
|
||||
|
||||
USER 999
|
||||
|
||||
@@ -5,9 +5,11 @@ FROM argocd-base
|
||||
COPY argocd /usr/local/bin/
|
||||
|
||||
USER root
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller
|
||||
|
||||
USER 999
|
||||
|
||||
44
Makefile
44
Makefile
@@ -51,6 +51,7 @@ ARGOCD_E2E_TEST_TIMEOUT?=30m
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
ARGOCD_BIN_MODE?=true
|
||||
|
||||
ARGOCD_LINT_GOGC?=20
|
||||
|
||||
@@ -135,7 +136,6 @@ override LDFLAGS += \
|
||||
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
|
||||
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
|
||||
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
|
||||
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
|
||||
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}
|
||||
|
||||
ifeq (${STATIC_BUILD}, true)
|
||||
@@ -179,7 +179,7 @@ gogen: ensure-gopath
|
||||
go generate ./util/argo/...
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: ensure-gopath
|
||||
protogen: ensure-gopath mod-vendor-local
|
||||
export GO111MODULE=off
|
||||
./hack/generate-proto.sh
|
||||
|
||||
@@ -229,11 +229,13 @@ gen-resources-cli-local: clean-debug
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
|
||||
.PHONY: release-cli
|
||||
release-cli: clean-debug
|
||||
release-cli: clean-debug build-ui
|
||||
make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all
|
||||
make BIN_NAME=argocd-darwin-arm64 GOOS=darwin GOARCH=arm64 argocd-all
|
||||
make BIN_NAME=argocd-linux-amd64 GOOS=linux argocd-all
|
||||
make BIN_NAME=argocd-linux-arm64 GOOS=linux GOARCH=arm64 argocd-all
|
||||
make BIN_NAME=argocd-linux-ppc64le GOOS=linux GOARCH=ppc64le argocd-all
|
||||
make BIN_NAME=argocd-linux-s390x GOOS=linux GOARCH=s390x argocd-all
|
||||
make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all
|
||||
|
||||
.PHONY: test-tools-image
|
||||
@@ -266,17 +268,20 @@ repo-server:
|
||||
controller:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
|
||||
.PHONY: build-ui
|
||||
build-ui:
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-ui --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
|
||||
.PHONY: image
|
||||
ifeq ($(DEV_IMAGE), true)
|
||||
# The "dev" image builds the binaries from the users desktop environment (instead of in Docker)
|
||||
# which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since
|
||||
# the dist directory is under .dockerignore.
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image:
|
||||
docker build -t argocd-base --target argocd-base .
|
||||
docker build -t argocd-ui --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
@@ -287,7 +292,7 @@ image:
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
|
||||
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
|
||||
@@ -415,10 +420,11 @@ start-e2e: test-tools-image
|
||||
|
||||
# Starts e2e server locally (or within a container)
|
||||
.PHONY: start-e2e-local
|
||||
start-e2e-local:
|
||||
start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
kubectl create ns argocd-e2e || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
@@ -431,9 +437,11 @@ start-e2e-local:
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
BIN_MODE=$(ARGOCD_BIN_MODE) \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
|
||||
@@ -506,10 +514,6 @@ serve-docs-local:
|
||||
serve-docs:
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000
|
||||
|
||||
.PHONY: lint-docs
|
||||
lint-docs:
|
||||
# https://github.com/dkhamsing/awesome_bot
|
||||
find docs -name '*.md' -exec grep -l http {} + | xargs docker run --rm -v $(PWD):/mnt:ro dkhamsing/awesome_bot -t 3 --allow-dupe --allow-redirect --allow-timeout --allow-ssl --allow 502,500,429,400 --white-list `cat docs/url-allow-list | grep -v "#" | tr "\n" ','` --skip-save-results --
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
.PHONY: verify-kube-connect
|
||||
@@ -531,9 +535,7 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
|
||||
# Installs all tools required for running unit & end-to-end tests (Linux packages)
|
||||
.PHONY: install-test-tools-local
|
||||
install-test-tools-local:
|
||||
./hack/install.sh kustomize-linux
|
||||
./hack/install.sh ksonnet-linux
|
||||
./hack/install.sh helm2-linux
|
||||
./hack/install.sh kustomize
|
||||
./hack/install.sh helm-linux
|
||||
|
||||
# Installs all tools required for running codegen (Linux packages)
|
||||
@@ -559,3 +561,11 @@ start-test-k8s:
|
||||
.PHONY: list
|
||||
list:
|
||||
@LC_ALL=C $(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$'
|
||||
|
||||
.PHONY: applicationset-controller
|
||||
applicationset-controller:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
|
||||
.PHONY: checksums
|
||||
checksums:
|
||||
for f in ./dist/$(BIN_NAME)-*; do openssl dgst -sha256 "$$f" | awk ' { print $$2 }' > "$$f".sha256 ; done
|
||||
|
||||
8
OWNERS
8
OWNERS
@@ -8,20 +8,22 @@ approvers:
|
||||
- jannfis
|
||||
- jessesuen
|
||||
- jgwest
|
||||
- keithchong
|
||||
- mayzhang2000
|
||||
- rbreeze
|
||||
- leoluz
|
||||
- crenshaw-dev
|
||||
- pasha-codefresh
|
||||
|
||||
reviewers:
|
||||
- dthomson25
|
||||
- tetchel
|
||||
- terrytangyuan
|
||||
- wtam2018
|
||||
- ishitasequeira
|
||||
- reginapizza
|
||||
- hblixt
|
||||
- chetan-rns
|
||||
- wanghong230
|
||||
- pasha-codefresh
|
||||
- ciiay
|
||||
- leoluz
|
||||
- crenshaw-dev
|
||||
- saumeya
|
||||
|
||||
11
Procfile
11
Procfile
@@ -1,9 +1,12 @@
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} "
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.6-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:7.0.0-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" == 'true' ] && exit 0 || [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
|
||||
|
||||
13
README.md
13
README.md
@@ -1,8 +1,4 @@
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://codecov.io/gh/argoproj/argo-cd)
|
||||
[](https://github.com/argoproj/argo-cd/releases/latest)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4486)
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [](https://argoproj.github.io/community/join-slack) [](https://codecov.io/gh/argoproj/argo-cd) [](https://github.com/argoproj/argo-cd/releases/latest) [](https://bestpractices.coreinfrastructure.org/projects/4486) [](https://twitter.com/argoproj)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
@@ -36,8 +32,8 @@ Check live demo at https://cd.apps.argoproj.io/.
|
||||
|
||||
* Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions)
|
||||
* Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack)
|
||||
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
|
||||
* User Community meeting: [Every other Wednesday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
* User Community meeting: [First Wednesday of the month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
|
||||
|
||||
|
||||
Participation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
|
||||
@@ -73,3 +69,6 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
|
||||
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
|
||||
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
|
||||
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
|
||||
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
|
||||
|
||||
|
||||
10
SECURITY.md
10
SECURITY.md
@@ -6,7 +6,7 @@ Version: **v1.4 (2022-01-23)**
|
||||
|
||||
As a deployment tool, Argo CD needs to have production access which makes
|
||||
security a very important topic. The Argoproj team takes security very
|
||||
seriously and is continuously working on improving it.
|
||||
seriously and is continuously working on improving it.
|
||||
|
||||
## A word about security scanners
|
||||
|
||||
@@ -60,17 +60,17 @@ We will do our best to react quickly on your inquiry, and to coordinate a fix
|
||||
and disclosure with you. Sometimes, it might take a little longer for us to
|
||||
react (e.g. out of office conditions), so please bear with us in these cases.
|
||||
|
||||
We will publish security advisiories using the
|
||||
We will publish security advisories using the
|
||||
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
|
||||
feature to keep our community well informed, and will credit you for your
|
||||
findings (unless you prefer to stay anonymous, of course).
|
||||
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
|
||||
* cncf-argo-security@lists.cncf.io
|
||||
|
||||
## Securing your Argo CD Instance
|
||||
|
||||
See the [operator manual security page](docs/operator-manual/security.md) for
|
||||
additional information about Argo CD's security features and how to make your
|
||||
See the [operator manual security page](docs/operator-manual/security.md) for
|
||||
additional information about Argo CD's security features and how to make your
|
||||
Argo CD production ready.
|
||||
|
||||
76
USERS.md
76
USERS.md
@@ -13,8 +13,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
1. [Allianz Direct](https://www.allianzdirect.de/)
|
||||
1. [Ambassador Labs](https://www.getambassador.io/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
|
||||
@@ -23,25 +23,31 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
|
||||
1. [Beat](https://thebeat.co/en/)
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [BMW Group](https://www.bmwgroup.com/)
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [Crédit Agricole CIB](https://www.ca-cib.com)
|
||||
1. [CROZ d.o.o.](https://croz.net/)
|
||||
1. [Crédit Agricole CIB](https://www.ca-cib.com)
|
||||
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
1. [Cybozu](https://cybozu-global.com)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [D2iQ](https://www.d2iq.com)
|
||||
1. [Datarisk](https://www.datarisk.io/)
|
||||
1. [Deloitte](https://www.deloitte.com/)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [edX](https://edx.org)
|
||||
@@ -49,16 +55,18 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Elium](https://www.elium.com)
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flip](https://flip.id)
|
||||
1. [Fonoa](https://www.fonoa.com/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
1. [Future PLC](https://www.futureplc.com/)
|
||||
1. [Garner](https://www.garnercorp.com)
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Garner](https://www.garnercorp.com)
|
||||
1. [Generali Deutschland AG](https://www.generali.de/)
|
||||
1. [Gitpod](https://www.gitpod.io)
|
||||
1. [Glovo](https://www.glovoapp.com)
|
||||
1. [Gllue](https://gllue.com)
|
||||
1. [Glovo](https://www.glovoapp.com)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Gojek](https://www.gojek.io/)
|
||||
1. [Greenpass](https://www.greenpass.com.br/)
|
||||
@@ -71,25 +79,34 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [IBM](https://www.ibm.com/)
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
1. [JovianX](https://www.jovianx.com/)
|
||||
1. [Karrot](https://www.daangn.com/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
1. [KarrotPay](https://www.daangnpay.com/)
|
||||
1. [Karrot](https://www.daangn.com/)
|
||||
1. [Kasa](https://kasa.co.kr/)
|
||||
1. [Keeeb](https://www.keeeb.com/)
|
||||
1. [Keptn](https://keptn.sh)
|
||||
1. [Kinguin](https://www.kinguin.net/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
1. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
1. [KubeSphere](https://github.com/kubesphere)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [LINE](https://linecorp.com/en/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
|
||||
1. [Major League Baseball](https://mlb.com)
|
||||
1. [Mambu](https://www.mambu.com/)
|
||||
1. [MariaDB](https://mariadb.com)
|
||||
1. [Mattermost](https://www.mattermost.com)
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
1. [mixi Group](https://mixi.co.jp/)
|
||||
@@ -111,6 +128,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Opensurvey](https://www.opensurvey.co.kr/)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
@@ -124,15 +142,24 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Quipper](https://www.quipper.com/)
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Robotinfra](https://www.robotinfra.com)
|
||||
1. [Rubin Observatory](https://www.lsst.org)
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Speee](https://speee.jp/)
|
||||
1. [Spendesk](https://spendesk.com/)
|
||||
1. [Spores Labs](https://spores.app)
|
||||
1. [Stuart](https://stuart.com/)
|
||||
1. [Sumo Logic](https://sumologic.com/)
|
||||
1. [Sutpc](http://www.sutpc.com/)
|
||||
1. [Swiss Post](https://github.com/swisspost)
|
||||
@@ -141,6 +168,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Syncier](https://syncier.com/)
|
||||
1. [TableCheck](https://tablecheck.com/)
|
||||
1. [Tailor Brands](https://www.tailorbrands.com)
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [ThousandEyes](https://www.thousandeyes.com/)
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
@@ -150,9 +179,10 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [tru.ID](https://tru.id)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
@@ -160,38 +190,18 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [VSHN - The DevOps Company](https://vshn.ch/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
1. [Wehkamp](https://www.wehkamp.nl/)
|
||||
1. [WeMo Scooter](https://www.wemoscooter.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
|
||||
1. [Witick](https://witick.io/)
|
||||
1. [WooliesX](https://wooliesx.com.au/)
|
||||
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
|
||||
1. [WSpot](https://www.wspot.com.br/)
|
||||
1. [Yieldlab](https://www.yieldlab.de/)
|
||||
1. [Zimpler](https://www.zimpler.com/)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [MariaDB](https://mariadb.com)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
1. [Youverify](https://youverify.co/)
|
||||
1. [Keeeb](https://www.keeeb.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
|
||||
1. [Yubo](https://www.yubo.live/)
|
||||
1. [Zimpler](https://www.zimpler.com/)
|
||||
1. [ZOZO](https://corp.zozo.com/)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [RapidAPI](https://www.rapidapi.com/)
|
||||
|
||||
724
applicationset/controllers/applicationset_controller.go
Normal file
724
applicationset/controllers/applicationset_controller.go
Normal file
@@ -0,0 +1,724 @@
|
||||
/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// Rather than importing the whole argocd-notifications controller, just copying the const here
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/subscriptions.go#L12
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
|
||||
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
|
||||
ReconcileRequeueOnValidationError = time.Minute * 3
|
||||
)
|
||||
|
||||
// ApplicationSetReconciler reconciles a ApplicationSet object
|
||||
type ApplicationSetReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
Generators map[string]generators.Generator
|
||||
ArgoDB db.ArgoDB
|
||||
ArgoAppClientset appclientset.Interface
|
||||
KubeClientset kubernetes.Interface
|
||||
utils.Policy
|
||||
utils.Renderer
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch
|
||||
|
||||
func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = r.Log.WithValues("applicationset", req.NamespacedName)
|
||||
_ = log.WithField("applicationset", req.NamespacedName)
|
||||
|
||||
var applicationSetInfo argoprojiov1alpha1.ApplicationSet
|
||||
parametersGenerated := false
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, &applicationSetInfo); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
log.WithError(err).Infof("unable to get ApplicationSet: '%v' ", err)
|
||||
}
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
|
||||
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Log a warning if there are unrecognized generators
|
||||
utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: string(applicationSetReason),
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
parametersGenerated = true
|
||||
|
||||
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo, req.Namespace)
|
||||
if err != nil {
|
||||
// While some generators may return an error that requires user intervention,
|
||||
// other generators reference external resources that may change to cause
|
||||
// the error to no longer occur. We thus log the error and requeue
|
||||
// with a timeout to give this another shot at a later time.
|
||||
//
|
||||
// Changes to watched resources will cause this to be reconciled sooner than
|
||||
// the RequeueAfter time.
|
||||
log.Errorf("error occurred during application validation: %s", err.Error())
|
||||
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range desiredApplications {
|
||||
if validateErrors[i] == nil {
|
||||
validApps = append(validApps, desiredApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(validateErrors) > 0 {
|
||||
var message string
|
||||
for _, v := range validateErrors {
|
||||
message = v.Error()
|
||||
log.Errorf("validation error found during application validation: %s", message)
|
||||
}
|
||||
if len(validateErrors) > 1 {
|
||||
// Only the last message gets added to the appset status, to keep the size reasonable.
|
||||
message = fmt.Sprintf("%s (and %d more)", message, len(validateErrors)-1)
|
||||
}
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: message,
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
}
|
||||
|
||||
if r.Policy.Update() {
|
||||
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonUpdateApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
err = r.createInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonCreateApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Policy.Delete() {
|
||||
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonDeleteApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if applicationSetInfo.RefreshRequired() {
|
||||
delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh)
|
||||
err := r.Client.Update(ctx, &applicationSetInfo)
|
||||
if err != nil {
|
||||
log.Warnf("error occurred while updating ApplicationSet: %v", err)
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonRefreshApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
requeueAfter := r.getMinRequeueAfter(&applicationSetInfo)
|
||||
log.WithField("requeueAfter", requeueAfter).Info("end reconcile")
|
||||
|
||||
if len(validateErrors) == 0 {
|
||||
if err := r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{
|
||||
RequeueAfter: requeueAfter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getParametersGeneratedCondition(parametersGenerated bool, message string) argoprojiov1alpha1.ApplicationSetCondition {
|
||||
var paramtersGeneratedCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
if parametersGenerated {
|
||||
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonParametersGenerated,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
} else {
|
||||
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: message,
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonErrorOccurred,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
return paramtersGeneratedCondition
|
||||
}
|
||||
|
||||
func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argoprojiov1alpha1.ApplicationSetCondition {
|
||||
var resourceUpToDateCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
if errorOccurred {
|
||||
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: message,
|
||||
Reason: reason,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
} else {
|
||||
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "ApplicationSet up to date",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
}
|
||||
return resourceUpToDateCondition
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argoprojiov1alpha1.ApplicationSet, condition argoprojiov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error {
|
||||
// check if error occurred during reconcile process
|
||||
errOccurred := condition.Type == argoprojiov1alpha1.ApplicationSetConditionErrorOccurred
|
||||
|
||||
var errOccurredCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
|
||||
if errOccurred {
|
||||
errOccurredCondition = condition
|
||||
} else {
|
||||
errOccurredCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
|
||||
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
|
||||
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
|
||||
|
||||
newConditions := []argoprojiov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
|
||||
|
||||
needToUpdateConditions := false
|
||||
for _, condition := range newConditions {
|
||||
// do nothing if appset already has same condition
|
||||
for _, c := range applicationSet.Status.Conditions {
|
||||
if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) {
|
||||
needToUpdateConditions = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
evaluatedTypes := map[argoprojiov1alpha1.ApplicationSetConditionType]bool{
|
||||
argoprojiov1alpha1.ApplicationSetConditionErrorOccurred: true,
|
||||
argoprojiov1alpha1.ApplicationSetConditionParametersGenerated: true,
|
||||
argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
|
||||
}
|
||||
|
||||
if needToUpdateConditions || len(applicationSet.Status.Conditions) < 3 {
|
||||
// fetch updated Application Set object before updating it
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
|
||||
applicationSet.Status.SetConditions(
|
||||
newConditions, evaluatedTypes,
|
||||
)
|
||||
|
||||
// Update the newly fetched object with new set of conditions
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
if err != nil && !apierr.IsNotFound(err) {
|
||||
return fmt.Errorf("unable to set application set condition: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
|
||||
// generated applications.
|
||||
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argoprojiov1alpha1.ApplicationSet, namespace string) (map[int]error, error) {
|
||||
errorsByIndex := map[int]error{}
|
||||
namesSet := map[string]bool{}
|
||||
for i, app := range desiredApplications {
|
||||
|
||||
if !namesSet[app.Name] {
|
||||
namesSet[app.Name] = true
|
||||
} else {
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, namespace); err != nil {
|
||||
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return errorsByIndex, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argoprojiov1alpha1.ApplicationSet) time.Duration {
|
||||
var res time.Duration
|
||||
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
|
||||
|
||||
relevantGenerators := generators.GetRelevantGenerators(&requestedGenerator, r.Generators)
|
||||
|
||||
for _, g := range relevantGenerators {
|
||||
t := g.GetRequeueAfter(&requestedGenerator)
|
||||
|
||||
if res == 0 {
|
||||
res = t
|
||||
} else if t != 0 && t < res {
|
||||
res = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func getTempApplication(applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) *argov1alpha1.Application {
|
||||
var tmplApplication argov1alpha1.Application
|
||||
tmplApplication.Annotations = applicationSetTemplate.Annotations
|
||||
tmplApplication.Labels = applicationSetTemplate.Labels
|
||||
tmplApplication.Namespace = applicationSetTemplate.Namespace
|
||||
tmplApplication.Name = applicationSetTemplate.Name
|
||||
tmplApplication.Spec = applicationSetTemplate.Spec
|
||||
tmplApplication.Finalizers = applicationSetTemplate.Finalizers
|
||||
|
||||
return &tmplApplication
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argoprojiov1alpha1.ApplicationSetReasonType, error) {
|
||||
var res []argov1alpha1.Application
|
||||
|
||||
var firstError error
|
||||
var applicationSetReason argoprojiov1alpha1.ApplicationSetReasonType
|
||||
|
||||
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
|
||||
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonApplicationParamsGenerationError
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, a := range t {
|
||||
tmplApplication := getTempApplication(a.Template)
|
||||
|
||||
for _, p := range a.Params {
|
||||
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonRenderTemplateParamsError
|
||||
}
|
||||
continue
|
||||
}
|
||||
res = append(res, *app)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
|
||||
log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
|
||||
}
|
||||
|
||||
return res, applicationSetReason, firstError
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
|
||||
// grab the job object, extract the owner...
|
||||
app := rawObj.(*argov1alpha1.Application)
|
||||
owner := metav1.GetControllerOf(app)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
// ...make sure it's a application set...
|
||||
if owner.APIVersion != argoprojiov1alpha1.GroupVersion.String() || owner.Kind != "ApplicationSet" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&argoprojiov1alpha1.ApplicationSet{}).
|
||||
Owns(&argov1alpha1.Application{}).
|
||||
Watches(
|
||||
&source.Kind{Type: &corev1.Secret{}},
|
||||
&clusterSecretEventHandler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
}).
|
||||
// TODO: also watch Applications and respond on changes if we own them.
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// createOrUpdateInCluster will create / update application resources in the cluster.
|
||||
// - For new applications, it will call create
|
||||
// - For existing application, it will call update
|
||||
// The function also adds owner reference to all applications, and uses it to delete them.
|
||||
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var firstError error
|
||||
// Creates or updates the application in appList
|
||||
for _, generatedApp := range desiredApplications {
|
||||
|
||||
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
|
||||
generatedApp.Namespace = applicationSet.Namespace
|
||||
|
||||
found := &argov1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: generatedApp.Name,
|
||||
Namespace: generatedApp.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
// Preserve argo cd notifications state (https://github.com/argoproj/applicationset/issues/180)
|
||||
if state, exists := found.ObjectMeta.Annotations[NotifiedAnnotationKey]; exists {
|
||||
if generatedApp.Annotations == nil {
|
||||
generatedApp.Annotations = map[string]string{}
|
||||
}
|
||||
generatedApp.Annotations[NotifiedAnnotationKey] = state
|
||||
}
|
||||
found.ObjectMeta.Annotations = generatedApp.Annotations
|
||||
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action)
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// createInCluster will filter from the desiredApplications only the application that needs to be created
|
||||
// Then it will call createOrUpdateInCluster to do the actual create
|
||||
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var createApps []argov1alpha1.Application
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names that are current in the cluster
|
||||
|
||||
for _, app := range current {
|
||||
m[app.Name] = true
|
||||
}
|
||||
|
||||
// filter applications that are not in m[string]bool (new to the cluster)
|
||||
for _, app := range desiredApplications {
|
||||
_, exists := m[app.Name]
|
||||
|
||||
if !exists {
|
||||
createApps = append(createApps, app)
|
||||
}
|
||||
}
|
||||
|
||||
return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
|
||||
// TODO: Should this use the context param?
|
||||
var current argov1alpha1.ApplicationList
|
||||
err := r.Client.List(context.Background(), ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return current.Items, nil
|
||||
}
|
||||
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
|
||||
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
|
||||
// clusterList, err := argoDB.ListClusters(ctx)
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, applicationSet.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save current applications to be able to delete the ones that are not in appList
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
|
||||
|
||||
for _, app := range desiredApplications {
|
||||
m[app.Name] = true
|
||||
}
|
||||
|
||||
// Delete apps that are not in m[string]bool
|
||||
var firstError error
|
||||
for _, app := range current {
|
||||
appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
|
||||
_, exists := m[app.Name]
|
||||
|
||||
if !exists {
|
||||
|
||||
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
|
||||
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
|
||||
if err != nil {
|
||||
appLog.WithError(err).Error("failed to update Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.Client.Delete(ctx, &app)
|
||||
if err != nil {
|
||||
appLog.WithError(err).Error("failed to delete Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
|
||||
appLog.Log(log.InfoLevel, "Deleted application")
|
||||
}
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// removeFinalizerOnInvalidDestination removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
|
||||
func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, app *argov1alpha1.Application, clusterList *argov1alpha1.ClusterList, appLog *log.Entry) error {
|
||||
|
||||
// Only check if the finalizers need to be removed IF there are finalizers to remove
|
||||
if len(app.Finalizers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validDestination bool
|
||||
|
||||
// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, applicationSet.Namespace); err != nil {
|
||||
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
|
||||
validDestination = false
|
||||
} else {
|
||||
|
||||
// Detect if the destination's server field does not match an existing cluster
|
||||
|
||||
matchingCluster := false
|
||||
for _, cluster := range clusterList.Items {
|
||||
|
||||
// Server fields must match. Note that ValidateDestination ensures that the server field is set, if applicable.
|
||||
if app.Spec.Destination.Server != cluster.Server {
|
||||
continue
|
||||
}
|
||||
|
||||
// The name must match, if it is not empty
|
||||
if app.Spec.Destination.Name != "" && cluster.Name != app.Spec.Destination.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
matchingCluster = true
|
||||
break
|
||||
}
|
||||
|
||||
if !matchingCluster {
|
||||
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name)
|
||||
}
|
||||
|
||||
validDestination = matchingCluster
|
||||
}
|
||||
// If the destination is invalid (for example the cluster is no longer defined), then remove
|
||||
// the application finalizers to avoid triggering Argo CD bug #5817
|
||||
if !validDestination {
|
||||
|
||||
// Filter out the Argo CD finalizer from the finalizer list
|
||||
var newFinalizers []string
|
||||
for _, existingFinalizer := range app.Finalizers {
|
||||
if existingFinalizer != argov1alpha1.ResourcesFinalizerName { // only remove this one
|
||||
newFinalizers = append(newFinalizers, existingFinalizer)
|
||||
}
|
||||
}
|
||||
|
||||
// If the finalizer length changed (due to filtering out an Argo finalizer), update the finalizer list on the app
|
||||
if len(newFinalizers) != len(app.Finalizers) {
|
||||
app.Finalizers = newFinalizers
|
||||
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Updated", "Updated Application %q finalizer before deletion, because application has an invalid destination", app.Name)
|
||||
appLog.Log(log.InfoLevel, "Updating application finalizer before deletion, because application has an invalid destination")
|
||||
|
||||
err := r.Client.Update(ctx, app, &client.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
1927
applicationset/controllers/applicationset_controller_test.go
Normal file
1927
applicationset/controllers/applicationset_controller_test.go
Normal file
File diff suppressed because it is too large
Load Diff
84
applicationset/controllers/clustereventhandler.go
Normal file
84
applicationset/controllers/clustereventhandler.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// clusterSecretEventHandler is used when watching Secrets to check if they are ArgoCD Cluster Secrets, and if so
|
||||
// requeue any related ApplicationSets.
|
||||
type clusterSecretEventHandler struct {
|
||||
//handler.EnqueueRequestForOwner
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.ObjectNew)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
// addRateLimitingInterface defines the Add method of workqueue.RateLimitingInterface, allow us to easily mock
|
||||
// it for testing purposes.
|
||||
type addRateLimitingInterface interface {
|
||||
Add(item interface{})
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingInterface, object client.Object) {
|
||||
|
||||
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
|
||||
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
|
||||
return
|
||||
}
|
||||
|
||||
h.Log.WithFields(log.Fields{
|
||||
"namespace": object.GetNamespace(),
|
||||
"name": object.GetName(),
|
||||
}).Info("processing event for cluster secret")
|
||||
|
||||
appSetList := &argoprojiov1alpha1.ApplicationSetList{}
|
||||
err := h.Client.List(context.Background(), appSetList)
|
||||
if err != nil {
|
||||
h.Log.WithError(err).Error("unable to list ApplicationSets")
|
||||
return
|
||||
}
|
||||
|
||||
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
|
||||
for _, appSet := range appSetList.Items {
|
||||
|
||||
foundClusterGenerator := false
|
||||
for _, generator := range appSet.Spec.Generators {
|
||||
if generator.Clusters != nil {
|
||||
foundClusterGenerator = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundClusterGenerator {
|
||||
|
||||
// TODO: only queue the AppGenerator if the labels match this cluster
|
||||
req := ctrl.Request{NamespacedName: types.NamespacedName{Namespace: appSet.Namespace, Name: appSet.Name}}
|
||||
q.Add(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
234
applicationset/controllers/clustereventhandler_test.go
Normal file
234
applicationset/controllers/clustereventhandler_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestClusterEventHandler(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := argoprojiov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = argov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
items []argoprojiov1alpha1.ApplicationSet
|
||||
secret corev1.Secret
|
||||
expectedRequests []ctrl.Request
|
||||
}{
|
||||
{
|
||||
name: "no application sets should mean no requests",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "a cluster generator should produce a request",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{{
|
||||
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "multiple cluster generators should produce multiple requests",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set2",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-cluster generator should not match",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "app-set-non-cluster",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-non-argocd-secret",
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
appSetList := argoprojiov1alpha1.ApplicationSetList{
|
||||
Items: test.items,
|
||||
}
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
|
||||
|
||||
handler := &clusterSecretEventHandler{
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
}
|
||||
|
||||
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
|
||||
|
||||
handler.queueRelatedAppGenerators(&mockAddRateLimitingInterface, &test.secret)
|
||||
|
||||
assert.False(t, mockAddRateLimitingInterface.errorOccurred)
|
||||
assert.ElementsMatch(t, mockAddRateLimitingInterface.addedItems, test.expectedRequests)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add checks the type, and adds it to the internal list of received additions
|
||||
func (obj *mockAddRateLimitingInterface) Add(item interface{}) {
|
||||
if req, ok := item.(ctrl.Request); ok {
|
||||
obj.addedItems = append(obj.addedItems, req)
|
||||
} else {
|
||||
obj.errorOccurred = true
|
||||
}
|
||||
}
|
||||
|
||||
type mockAddRateLimitingInterface struct {
|
||||
errorOccurred bool
|
||||
addedItems []ctrl.Request
|
||||
}
|
||||
19
applicationset/examples/cluster/cluster-example.yaml
Normal file
19
applicationset/examples/cluster/cluster-example.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
57
applicationset/examples/clusterDecisionResource/README.md
Normal file
57
applicationset/examples/clusterDecisionResource/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# How the Cluster Decision Resource generator works for clusterDecisionResource
|
||||
1. The Cluster Decision Resource generator reads a configurable status format:
|
||||
```yaml
|
||||
status:
|
||||
clusters:
|
||||
- name: cluster-01
|
||||
- name: cluster-02
|
||||
```
|
||||
This is a common status format. Another format that could be read looks like this:
|
||||
```yaml
|
||||
status:
|
||||
decisions:
|
||||
- clusterName: cluster-01
|
||||
namespace: cluster-01
|
||||
- clusterName: cluster-02
|
||||
namespace: cluster-02
|
||||
```
|
||||
2. Any resource that has a list of key / value pairs, where the value matches ArgoCD cluster names can be used.
|
||||
3. The key / value pairs found in each element of the list will be available to the template. As well, `name` and `server` will still be available to the template.
|
||||
4. The Service Account used by the ApplicationSet controller must have access to `Get` the resource you want to retrieve the duck type definition from
|
||||
5. A configMap is used to identify the resource to read status of generated ArgoCD clusters from. You can use multiple resources by creating a ConfigMap for each one in the ArgoCD namespace.
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: my-configmap
|
||||
data:
|
||||
apiVersion: group.io/v1
|
||||
kind: mykinds
|
||||
statusListKey: clusters
|
||||
matchKey: name
|
||||
```
|
||||
* `apiVersion` - This is the apiVersion of your resource
|
||||
* `kind` - This is the plural kind of your resource
|
||||
* `statusListKey` - Default is 'clusters', this is the key found in your resource's status that is a list of ArgoCD clusters.
|
||||
* `matchKey` - Is the key name found in the cluster list, `name` and `clusterName` are the keys in the examples above.
|
||||
|
||||
# Applying the example
|
||||
1. Connect to a cluster with the ApplicationSet controller running
|
||||
2. Edit the Role for the ApplicationSet service account, and grant it permission to `list` the `placementdecisions` resources, from apiGroups `cluster.open-cluster-management.io/v1alpha1`
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- "cluster.open-cluster-management.io/v1alpha1"
|
||||
resources:
|
||||
- placementdecisions
|
||||
verbs:
|
||||
- list
|
||||
```
|
||||
3. Apply the following controller and associated ManagedCluster CRD's:
|
||||
https://github.com/open-cluster-management/placement
|
||||
4. Now apply the PlacementDecision and an ApplicationSet:
|
||||
```bash
|
||||
kubectl apply -f ./placementdecision.yaml
|
||||
kubectl apply -f ./configMap.yaml
|
||||
kubectl apply -f ./ducktype-example.yaml
|
||||
```
|
||||
5. For now this won't do anything until you create a controller that populates the `Status.Decisions` array.
|
||||
@@ -0,0 +1,11 @@
|
||||
# To generate a Status.Decisions from this CRD, requires https://github.com/open-cluster-management/multicloud-operators-placementrule be deployed
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ocm-placement
|
||||
data:
|
||||
apiVersion: apps.open-cluster-management.io/v1
|
||||
kind: placementrules
|
||||
statusListKey: decisions
|
||||
matchKey: clusterName
|
||||
@@ -0,0 +1,27 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: book-import
|
||||
spec:
|
||||
generators:
|
||||
- clusterDecisionResource:
|
||||
configMapRef: ocm-placement
|
||||
name: test-placement
|
||||
requeueAfterSeconds: 30
|
||||
template:
|
||||
metadata:
|
||||
name: '{{clusterName}}-book-import'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/open-cluster-management/application-samples.git
|
||||
targetRevision: HEAD
|
||||
path: book-import
|
||||
destination:
|
||||
name: '{{clusterName}}'
|
||||
namespace: bookimport
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
apiVersion: apps.open-cluster-management.io/v1
|
||||
kind: PlacementRule
|
||||
metadata:
|
||||
name: test-placement
|
||||
spec:
|
||||
clusterReplicas: 1 # Availability choice, maximum number of clusters to provision at once
|
||||
clusterSelector:
|
||||
matchLabels:
|
||||
'usage': 'development'
|
||||
clusterConditions:
|
||||
- type: ManagedClusterConditionAvailable
|
||||
status: "True"
|
||||
# Below is sample output the generator can consume.
|
||||
status:
|
||||
decisions:
|
||||
- clusterName: cluster-01
|
||||
- clusterName: cluster-02
|
||||
22
applicationset/examples/design-doc/applicationset.yaml
Normal file
22
applicationset/examples/design-doc/applicationset.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
# This is an example of a typical ApplicationSet which uses the cluster generator.
|
||||
# An ApplicationSet is comprised with two stanzas:
|
||||
# - spec.generator - producer of a list of values supplied as arguments to an app template
|
||||
# - spec.template - an application template, which has been parameterized
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
chart: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
33
applicationset/examples/design-doc/clusters.yaml
Normal file
33
applicationset/examples/design-doc/clusters.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
# The cluster generator produces an items list from all clusters registered to Argo CD.
|
||||
# It automatically provides the following fields as values to the app template:
|
||||
# - name
|
||||
# - server
|
||||
# - metadata.labels.<key>
|
||||
# - metadata.annotations.<key>
|
||||
# - values.<key>
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
values:
|
||||
project: default
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
labels:
|
||||
environment: '{{metadata.labels.environment}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
chart: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -0,0 +1,44 @@
|
||||
# This example demonstrates the git directory generator, which produces an items list
|
||||
# based on discovery of directories in a git repo matching a specified pattern.
|
||||
# Git generators automatically provide {{path}} and {{path.basename}} as available
|
||||
# variables to the app template.
|
||||
#
|
||||
# Suppose the following git directory structure (note the use of different config tools):
|
||||
#
|
||||
# cluster-deployments
|
||||
# └── add-ons
|
||||
# ├── argo-rollouts
|
||||
# │ ├── all.yaml
|
||||
# │ └── kustomization.yaml
|
||||
# ├── argo-workflows
|
||||
# │ └── install.yaml
|
||||
# ├── grafana
|
||||
# │ ├── Chart.yaml
|
||||
# │ └── values.yaml
|
||||
# └── prometheus-operator
|
||||
# ├── Chart.yaml
|
||||
# └── values.yaml
|
||||
#
|
||||
# The following ApplicationSet would produce four applications (in different namespaces),
|
||||
# using the directory basename as both the namespace and application name.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
directories:
|
||||
- path: add-ons/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: http://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
55
applicationset/examples/design-doc/git-files-discovery.yaml
Normal file
55
applicationset/examples/design-doc/git-files-discovery.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
# This example demonstrates a git file generator which traverses the directory structure of a git
|
||||
# repository to discover items based on a filename convention. For each file discovered, the
|
||||
# contents of the discovered files themselves, act as the set of inputs to the app template.
|
||||
#
|
||||
# Suppose the following git directory structure:
|
||||
#
|
||||
# cluster-deployments
|
||||
# ├── apps
|
||||
# │ └── guestbook
|
||||
# │ └── install.yaml
|
||||
# └── cluster-config
|
||||
# ├── engineering
|
||||
# │ ├── dev
|
||||
# │ │ └── config.json
|
||||
# │ └── prod
|
||||
# │ └── config.json
|
||||
# └── finance
|
||||
# ├── dev
|
||||
# │ └── config.json
|
||||
# └── prod
|
||||
# └── config.json
|
||||
#
|
||||
# The discovered files (e.g. config.json) files can be any structured data supplied to the
|
||||
# generated application. e.g.:
|
||||
# {
|
||||
# "aws_account": "123456",
|
||||
# "asset_id": "11223344"
|
||||
# "cluster": {
|
||||
# "owner": "Jesse_Suen@intuit.com",
|
||||
# "name": "engineering-dev",
|
||||
# "address": "http://1.2.3.4"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: "**/config.json"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: apps/guestbook
|
||||
destination:
|
||||
server: '{{cluster.address}}'
|
||||
namespace: guestbook
|
||||
68
applicationset/examples/design-doc/git-files-literal.yaml
Normal file
68
applicationset/examples/design-doc/git-files-literal.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
# This example demonstrates a git file generator which produces its items based on one or
|
||||
# more files referenced in a git repo. The referenced files would contain a json/yaml list of
|
||||
# arbitrary structured objects. Each item of the list would become a set of parameters to a
|
||||
# generated application.
|
||||
#
|
||||
# Suppose the following git directory structure:
|
||||
#
|
||||
# cluster-deployments
|
||||
# ├── apps
|
||||
# │ └── guestbook
|
||||
# │ ├── v1.0
|
||||
# │ │ └── install.yaml
|
||||
# │ └── v2.0
|
||||
# │ └── install.yaml
|
||||
# └── config
|
||||
# └── clusters.json
|
||||
#
|
||||
# In this example, the `clusters.json` file is json list of structured data:
|
||||
# [
|
||||
# {
|
||||
# "account": "123456",
|
||||
# "asset_id": "11223344",
|
||||
# "cluster": {
|
||||
# "owner": "Jesse_Suen@intuit.com",
|
||||
# "name": "engineering-dev",
|
||||
# "address": "http://1.2.3.4"
|
||||
# },
|
||||
# "appVersions": {
|
||||
# "prometheus-operator": "v0.38",
|
||||
# "guestbook": "v2.0"
|
||||
# }
|
||||
# },
|
||||
# {
|
||||
# "account": "456789",
|
||||
# "asset_id": "55667788",
|
||||
# "cluster": {
|
||||
# "owner": "Alexander_Matyushentsev@intuit.com",
|
||||
# "name": "engineering-prod",
|
||||
# "address": "http://2.4.6.8"
|
||||
# },
|
||||
# "appVersions": {
|
||||
# "prometheus-operator": "v0.38",
|
||||
# "guestbook": "v1.0"
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: config/clusters.json
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: apps/guestbook/{{appVersions.guestbook}}
|
||||
destination:
|
||||
server: http://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
33
applicationset/examples/design-doc/list.yaml
Normal file
33
applicationset/examples/design-doc/list.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
# The list generator specifies a literal list of argument values to the app spec template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
values:
|
||||
project: dev
|
||||
- cluster: engineering-prod
|
||||
url: https://2.4.6.8
|
||||
values:
|
||||
project: prod
|
||||
- cluster: finance-preprod
|
||||
url: https://9.8.7.6
|
||||
values:
|
||||
project: preprod
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
3
applicationset/examples/design-doc/proposal/README.md
Normal file
3
applicationset/examples/design-doc/proposal/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Proposal Examples
|
||||
This directory contains examples that are not yet implemented.
|
||||
They are part of the project to indicate future progress, and we are welcome any contribution that will add an implementation
|
||||
48
applicationset/examples/design-doc/proposal/filters.yaml
Normal file
48
applicationset/examples/design-doc/proposal/filters.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
# For all generators, filters can be applied to reduce the generated items to a smaller subset.
|
||||
# A powerful set of filter expressions are supported using syntax provided by the
|
||||
# https://github.com/antonmedv/expr library. Examples expressions are demonstrated below
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
# Match all clusters who meet ALL of the following conditions:
|
||||
# 1. name matches the regex `sales-.*`
|
||||
# 2. environment label is either 'staging' or 'prod'
|
||||
- clusters:
|
||||
filters:
|
||||
- expr: '{{name}} matches "sales-.*"'
|
||||
- expr: '{{metadata.labels.environment}} in [staging, prod]'
|
||||
values:
|
||||
version: '2.0.0'
|
||||
# Filter items from `config/clusters.json` in the `cluster-deployments` git repo,
|
||||
# to only those having the `cluster.enabled == true` property. e.g.:
|
||||
# {
|
||||
# ...
|
||||
# "cluster": {
|
||||
# "enabled": true,
|
||||
# ...
|
||||
# }
|
||||
# }
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: config/clusters.json
|
||||
filters:
|
||||
- expr: '{{cluster.enabled}} == true'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: "{{values.version}}"
|
||||
chart: guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: foo
|
||||
value: "{{metadata.annotations.foo}}"
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
48
applicationset/examples/design-doc/template-override.yaml
Normal file
48
applicationset/examples/design-doc/template-override.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
|
||||
# useful to do this in order to override the spec.template stanza, and when simple string
|
||||
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
|
||||
# a partial definition, which overrides/patch the default template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "project"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
path: '{{cluster}}-override'
|
||||
destination: {}
|
||||
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-prod
|
||||
url: https://1.2.3.4
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "project2"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
path: '{{cluster}}-override2'
|
||||
destination: {}
|
||||
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: "project"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -0,0 +1,6 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -0,0 +1,417 @@
|
||||
# This is an auto-generated file. DO NOT EDIT
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterworkflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: ClusterWorkflowTemplate
|
||||
listKind: ClusterWorkflowTemplateList
|
||||
plural: clusterworkflowtemplates
|
||||
shortNames:
|
||||
- clusterwftmpl
|
||||
- cwft
|
||||
singular: clusterworkflowtemplate
|
||||
scope: Cluster
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cronworkflows.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: CronWorkflow
|
||||
listKind: CronWorkflowList
|
||||
plural: cronworkflows
|
||||
shortNames:
|
||||
- cwf
|
||||
- cronwf
|
||||
singular: cronworkflow
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workfloweventbindings.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowEventBinding
|
||||
listKind: WorkflowEventBindingList
|
||||
plural: workfloweventbindings
|
||||
shortNames:
|
||||
- wfeb
|
||||
singular: workfloweventbinding
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflows.argoproj.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .status.phase
|
||||
description: Status of the workflow
|
||||
name: Status
|
||||
type: string
|
||||
- JSONPath: .status.startedAt
|
||||
description: When the workflow was started
|
||||
format: date-time
|
||||
name: Age
|
||||
type: date
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: Workflow
|
||||
listKind: WorkflowList
|
||||
plural: workflows
|
||||
shortNames:
|
||||
- wf
|
||||
singular: workflow
|
||||
scope: Namespaced
|
||||
subresources: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowTemplate
|
||||
listKind: WorkflowTemplateList
|
||||
plural: workflowtemplates
|
||||
shortNames:
|
||||
- wftmpl
|
||||
singular: workflowtemplate
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- create
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflowtemplates
|
||||
- workflowtemplates/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-server-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- watch
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workfloweventbindings
|
||||
- workflowtemplates
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-server-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-server-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: workflow-controller-configmap
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 2746
|
||||
targetPort: 2746
|
||||
selector:
|
||||
app: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: workflow-controller-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: 9090
|
||||
selector:
|
||||
app: workflow-controller
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: argo-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: argo-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
- --namespaced
|
||||
image: argoproj/argocli:v2.12.5
|
||||
name: argo-server
|
||||
ports:
|
||||
- containerPort: 2746
|
||||
name: web
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 2746
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: workflow-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: workflow-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: workflow-controller
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --configmap
|
||||
- workflow-controller-configmap
|
||||
- --executor-image
|
||||
- argoproj/argoexec:v2.12.5
|
||||
- --namespaced
|
||||
command:
|
||||
- workflow-controller
|
||||
image: argoproj/workflow-controller:v2.12.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
name: workflow-controller
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: metrics
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo
|
||||
@@ -0,0 +1,14 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -0,0 +1,4 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 9.4.10
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -0,0 +1 @@
|
||||
# Blank values.yaml
|
||||
@@ -0,0 +1,6 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -0,0 +1,417 @@
|
||||
# This is an auto-generated file. DO NOT EDIT
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterworkflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: ClusterWorkflowTemplate
|
||||
listKind: ClusterWorkflowTemplateList
|
||||
plural: clusterworkflowtemplates
|
||||
shortNames:
|
||||
- clusterwftmpl
|
||||
- cwft
|
||||
singular: clusterworkflowtemplate
|
||||
scope: Cluster
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cronworkflows.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: CronWorkflow
|
||||
listKind: CronWorkflowList
|
||||
plural: cronworkflows
|
||||
shortNames:
|
||||
- cwf
|
||||
- cronwf
|
||||
singular: cronworkflow
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workfloweventbindings.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowEventBinding
|
||||
listKind: WorkflowEventBindingList
|
||||
plural: workfloweventbindings
|
||||
shortNames:
|
||||
- wfeb
|
||||
singular: workfloweventbinding
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflows.argoproj.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .status.phase
|
||||
description: Status of the workflow
|
||||
name: Status
|
||||
type: string
|
||||
- JSONPath: .status.startedAt
|
||||
description: When the workflow was started
|
||||
format: date-time
|
||||
name: Age
|
||||
type: date
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: Workflow
|
||||
listKind: WorkflowList
|
||||
plural: workflows
|
||||
shortNames:
|
||||
- wf
|
||||
singular: workflow
|
||||
scope: Namespaced
|
||||
subresources: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowTemplate
|
||||
listKind: WorkflowTemplateList
|
||||
plural: workflowtemplates
|
||||
shortNames:
|
||||
- wftmpl
|
||||
singular: workflowtemplate
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- create
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflowtemplates
|
||||
- workflowtemplates/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-server-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- watch
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workfloweventbindings
|
||||
- workflowtemplates
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-server-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-server-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: workflow-controller-configmap
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 2746
|
||||
targetPort: 2746
|
||||
selector:
|
||||
app: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: workflow-controller-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: 9090
|
||||
selector:
|
||||
app: workflow-controller
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: argo-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: argo-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
- --namespaced
|
||||
image: argoproj/argocli:v2.12.5
|
||||
name: argo-server
|
||||
ports:
|
||||
- containerPort: 2746
|
||||
name: web
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 2746
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: workflow-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: workflow-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: workflow-controller
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --configmap
|
||||
- workflow-controller-configmap
|
||||
- --executor-image
|
||||
- argoproj/argoexec:v2.12.5
|
||||
- --namespaced
|
||||
command:
|
||||
- workflow-controller
|
||||
image: argoproj/workflow-controller:v2.12.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
name: workflow-controller
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: metrics
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo
|
||||
@@ -0,0 +1,23 @@
|
||||
apiVersion: v2
|
||||
name: helm-guestbook
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -0,0 +1,19 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "helm-guestbook.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "helm-guestbook.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "helm-guestbook.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "helm-guestbook.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -0,0 +1,32 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -0,0 +1,52 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "helm-guestbook.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
chart: {{ template "helm-guestbook.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "helm-guestbook.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
chart: {{ template "helm-guestbook.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -0,0 +1,2 @@
|
||||
service:
|
||||
type: LoadBalancer
|
||||
@@ -0,0 +1,45 @@
|
||||
# Default values for helm-guestbook.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: gcr.io/heptio-images/ks-guestbook-demo
|
||||
tag: 0.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -0,0 +1 @@
|
||||
name: helm-prometheus-operator
|
||||
@@ -0,0 +1,4 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 9.4.10
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -0,0 +1 @@
|
||||
# Blank values.yaml
|
||||
@@ -0,0 +1,25 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/git-generator-directory/excludes/cluster-addons/*
|
||||
- exclude: true
|
||||
path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -0,0 +1,23 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/git-generator-directory/cluster-addons/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -0,0 +1,7 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"aws_account": "123456",
|
||||
"asset_id": "11223344",
|
||||
"cluster": {
|
||||
"owner": "cluster-admin@company.com",
|
||||
"name": "engineering-dev",
|
||||
"address": "http://1.2.3.4"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"aws_account": "123456",
|
||||
"asset_id": "11223344",
|
||||
"cluster": {
|
||||
"owner": "cluster-admin@company.com",
|
||||
"name": "engineering-prod",
|
||||
"address": "http://1.2.3.4"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
files:
|
||||
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: "applicationset/examples/git-generator-files-discovery/apps/guestbook"
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
#server: '{{cluster.address}}'
|
||||
namespace: guestbook
|
||||
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
24
applicationset/examples/list-generator/list-example.yaml
Normal file
24
applicationset/examples/list-generator/list-example.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/list-generator/guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -0,0 +1,6 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -0,0 +1,417 @@
|
||||
# This is an auto-generated file. DO NOT EDIT
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterworkflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: ClusterWorkflowTemplate
|
||||
listKind: ClusterWorkflowTemplateList
|
||||
plural: clusterworkflowtemplates
|
||||
shortNames:
|
||||
- clusterwftmpl
|
||||
- cwft
|
||||
singular: clusterworkflowtemplate
|
||||
scope: Cluster
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cronworkflows.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: CronWorkflow
|
||||
listKind: CronWorkflowList
|
||||
plural: cronworkflows
|
||||
shortNames:
|
||||
- cwf
|
||||
- cronwf
|
||||
singular: cronworkflow
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workfloweventbindings.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowEventBinding
|
||||
listKind: WorkflowEventBindingList
|
||||
plural: workfloweventbindings
|
||||
shortNames:
|
||||
- wfeb
|
||||
singular: workfloweventbinding
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflows.argoproj.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .status.phase
|
||||
description: Status of the workflow
|
||||
name: Status
|
||||
type: string
|
||||
- JSONPath: .status.startedAt
|
||||
description: When the workflow was started
|
||||
format: date-time
|
||||
name: Age
|
||||
type: date
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: Workflow
|
||||
listKind: WorkflowList
|
||||
plural: workflows
|
||||
shortNames:
|
||||
- wf
|
||||
singular: workflow
|
||||
scope: Namespaced
|
||||
subresources: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowTemplate
|
||||
listKind: WorkflowTemplateList
|
||||
plural: workflowtemplates
|
||||
shortNames:
|
||||
- wftmpl
|
||||
singular: workflowtemplate
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- create
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflowtemplates
|
||||
- workflowtemplates/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-server-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- watch
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workfloweventbindings
|
||||
- workflowtemplates
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-server-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-server-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: workflow-controller-configmap
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 2746
|
||||
targetPort: 2746
|
||||
selector:
|
||||
app: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: workflow-controller-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: 9090
|
||||
selector:
|
||||
app: workflow-controller
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: argo-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: argo-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
- --namespaced
|
||||
image: argoproj/argocli:v2.12.5
|
||||
name: argo-server
|
||||
ports:
|
||||
- containerPort: 2746
|
||||
name: web
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 2746
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: workflow-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: workflow-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: workflow-controller
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --configmap
|
||||
- workflow-controller-configmap
|
||||
- --executor-image
|
||||
- argoproj/argoexec:v2.12.5
|
||||
- --namespaced
|
||||
command:
|
||||
- workflow-controller
|
||||
image: argoproj/workflow-controller:v2.12.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
name: workflow-controller
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: metrics
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo
|
||||
@@ -0,0 +1,14 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -0,0 +1,4 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 9.4.10
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -0,0 +1 @@
|
||||
# Blank values.yaml
|
||||
33
applicationset/examples/matrix/cluster-and-git.yaml
Normal file
33
applicationset/examples/matrix/cluster-and-git.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
# This example demonstrates the combining of the git generator with a cluster generator
|
||||
# The expected output would be an application per git directory and a cluster (application_count = git directory * clusters)
|
||||
#
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/matrix/cluster-addons/*
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}-{{name}}'
|
||||
spec:
|
||||
project: '{{metadata.labels.environment}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: '{{path.basename}}'
|
||||
39
applicationset/examples/matrix/list-and-git.yaml
Normal file
39
applicationset/examples/matrix/list-and-git.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
# This example demonstrates the combining of the git generator with a list generator
|
||||
# The expected output would be an application per git directory and a list entry (application_count = git directory * list entries)
|
||||
#
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: list-git
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/matrix/cluster-addons/*
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
values:
|
||||
project: dev
|
||||
- cluster: engineering-prod
|
||||
url: https://2.4.6.8
|
||||
values:
|
||||
project: prod
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}-{{cluster}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
37
applicationset/examples/matrix/list-and-list.yaml
Normal file
37
applicationset/examples/matrix/list-and-list.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: list-and-list
|
||||
namespace: argocd
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
suffix: '1'
|
||||
- values:
|
||||
suffix: '2'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-{{values.suffix}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -0,0 +1,67 @@
|
||||
# The matrix generator can contain other combination-type generators (matrix and union). But nested matrix and union
|
||||
# generators cannot contain further-nested matrix or union generators.
|
||||
#
|
||||
# The generators are evaluated from most-nested to least-nested. In this case:
|
||||
# 1. The union generator joins two lists to make 3 parameter sets.
|
||||
# 2. The inner matrix generator takes the cartesian product of the two lists to make 4 parameters sets.
|
||||
# 3. The outer matrix generator takes the cartesian product of the 3 union and the 4 inner matrix parameter sets to
|
||||
# make 3*4=12 final parameter sets.
|
||||
# 4. The 12 final parameter sets are evaluated against the top-level template to generate 12 Applications.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: matrix-and-union-in-matrix
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- union:
|
||||
mergeKeys:
|
||||
- cluster
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-test
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- matrix:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
suffix: '1'
|
||||
- values:
|
||||
suffix: '2'
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
prefix: 'first'
|
||||
- values:
|
||||
prefix: 'second'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{values.prefix}}-{{cluster}}-{{values.suffix}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
44
applicationset/examples/merge/merge-clusters-and-list.yaml
Normal file
44
applicationset/examples/merge/merge-clusters-and-list.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: merge-clusters-and-list
|
||||
spec:
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
- server
|
||||
generators:
|
||||
- clusters:
|
||||
values:
|
||||
kafka: 'true'
|
||||
redis: 'false'
|
||||
# For clusters with a specific label, enable Kafka.
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
use-kafka: 'false'
|
||||
values:
|
||||
kafka: 'false'
|
||||
# For a specific cluster, enable Redis.
|
||||
- list:
|
||||
elements:
|
||||
- server: https://some-specific-cluster
|
||||
values.redis: 'true'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: kafka
|
||||
value: '{{values.kafka}}'
|
||||
- name: redis
|
||||
value: '{{values.redis}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: default
|
||||
43
applicationset/examples/merge/merge-two-matrixes.yaml
Normal file
43
applicationset/examples/merge/merge-two-matrixes.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: merge-two-matrixes
|
||||
spec:
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
- server
|
||||
- environment
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- clusters:
|
||||
values:
|
||||
replicaCount: '2'
|
||||
- list:
|
||||
elements:
|
||||
- environment: staging
|
||||
namespace: guestbook-non-prod
|
||||
- environment: prod
|
||||
namespace: guestbook
|
||||
- list:
|
||||
elements:
|
||||
- server: https://kubernetes.default.svc
|
||||
environment: staging
|
||||
values.replicaCount: '1'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook-{{environment}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: replicaCount
|
||||
value: '{{values.replicaCount}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: '{{namespace}}'
|
||||
@@ -0,0 +1,40 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: myapp
|
||||
spec:
|
||||
generators:
|
||||
- pullRequest:
|
||||
github:
|
||||
# The GitHub organization or user.
|
||||
owner: myorg
|
||||
# The Github repository
|
||||
repo: myrepo
|
||||
# For GitHub Enterprise. (optional)
|
||||
api: https://git.example.com/
|
||||
# Reference to a Secret containing an access token. (optional)
|
||||
tokenRef:
|
||||
secretName: github-token
|
||||
key: token
|
||||
# Labels is used to filter the PRs that you want to target. (optional)
|
||||
labels:
|
||||
- preview
|
||||
template:
|
||||
metadata:
|
||||
name: 'myapp-{{ branch }}-{{ number }}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: 'https://github.com/myorg/myrepo.git'
|
||||
targetRevision: '{{ head_sha }}'
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: "image.tag"
|
||||
value: "pull-{{ head_sha }}"
|
||||
project: default
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: "{{ branch }}-{{ number }}"
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -0,0 +1,24 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- scmProvider:
|
||||
github:
|
||||
organization: argoproj
|
||||
cloneProtocol: https
|
||||
filters:
|
||||
- repositoryMatch: example-apps
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ repository }}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: '{{ url }}'
|
||||
targetRevision: '{{ branch }}'
|
||||
path: guestbook
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -0,0 +1,7 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -0,0 +1,7 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -0,0 +1,36 @@
|
||||
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
|
||||
# useful to do this in order to override the spec.template stanza, and when simple string
|
||||
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
|
||||
# a partial definition, which overrides/patch the default template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
targetRevision: HEAD
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
path: 'applicationset/examples/template-override/{{cluster}}-override'
|
||||
destination: {}
|
||||
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/template-override/default
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
184
applicationset/generators/cluster.go
Normal file
184
applicationset/generators/cluster.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type"
|
||||
ArgoCDSecretTypeCluster = "cluster"
|
||||
)
|
||||
|
||||
var _ Generator = (*ClusterGenerator)(nil)
|
||||
|
||||
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type ClusterGenerator struct {
|
||||
client.Client
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
// namespace is the Argo CD namespace
|
||||
namespace string
|
||||
settingsManager *settings.SettingsManager
|
||||
}
|
||||
|
||||
func NewClusterGenerator(c client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator {
|
||||
|
||||
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
|
||||
|
||||
g := &ClusterGenerator{
|
||||
Client: c,
|
||||
ctx: ctx,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
settingsManager: settingsManager,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) *argoappsetv1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Clusters.Template
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GenerateParams(
|
||||
appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, _ *argoappsetv1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.Clusters == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// Do not include the local cluster in the cluster parameters IF there is a non-empty selector
|
||||
// - Since local clusters do not have secrets, they do not have labels to match against
|
||||
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
clusterSecrets, err := g.getSecretsByClusterName(appSetGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
|
||||
for _, cluster := range clustersFromArgoCD.Items {
|
||||
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
|
||||
secretsFound = append(secretsFound, secretForCluster)
|
||||
|
||||
} else if !ignoreLocalClusters {
|
||||
// If there is no secret for the cluster, it's the local cluster, so handle it here.
|
||||
params := map[string]string{}
|
||||
params["name"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
|
||||
for key, value := range appSetGenerator.Clusters.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
|
||||
log.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range secretsFound {
|
||||
params := map[string]string{}
|
||||
params["name"] = string(cluster.Data["name"])
|
||||
params["nameNormalized"] = sanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
for key, value := range cluster.ObjectMeta.Annotations {
|
||||
params[fmt.Sprintf("metadata.annotations.%s", key)] = value
|
||||
}
|
||||
for key, value := range cluster.ObjectMeta.Labels {
|
||||
params[fmt.Sprintf("metadata.labels.%s", key)] = value
|
||||
}
|
||||
for key, value := range appSetGenerator.Clusters.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
log.WithField("cluster", cluster.Name).Info("matched cluster secret")
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
|
||||
// List all Clusters:
|
||||
clusterSecretList := &corev1.SecretList{}
|
||||
|
||||
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster)
|
||||
secretSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := g.Client.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("clusters matching labels", "count", len(clusterSecretList.Items))
|
||||
|
||||
res := map[string]corev1.Secret{}
|
||||
|
||||
for _, cluster := range clusterSecretList.Items {
|
||||
clusterName := string(cluster.Data["name"])
|
||||
|
||||
res[clusterName] = cluster
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
// sanitize the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func sanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
254
applicationset/generators/cluster_test.go
Normal file
254
applicationset/generators/cluster_test.go
Normal file
@@ -0,0 +1,254 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"testing"
|
||||
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type possiblyErroringFakeCtrlRuntimeClient struct {
|
||||
client.Client
|
||||
shouldError bool
|
||||
}
|
||||
|
||||
func (p *possiblyErroringFakeCtrlRuntimeClient) List(ctx context.Context, secretList client.ObjectList, opts ...client.ListOption) error {
|
||||
if p.shouldError {
|
||||
return fmt.Errorf("could not list Secrets")
|
||||
}
|
||||
return p.Client.List(ctx, secretList, opts...)
|
||||
}
|
||||
|
||||
func TestGenerateParams(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "staging-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("staging-01"),
|
||||
"server": []byte("https://staging-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "production-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production_01/west"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
selector metav1.LabelSelector
|
||||
values map[string]string
|
||||
expected []map[string]string
|
||||
// clientError is true if a k8s client error should be simulated
|
||||
clientError bool
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "no label selector",
|
||||
selector: metav1.LabelSelector{},
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
|
||||
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
|
||||
{"name": "in-cluster", "server": "https://kubernetes.default.svc"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "secret type label selector",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
},
|
||||
},
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
|
||||
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging with match labels",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
MatchLabels: map[string]string{
|
||||
"org": "foo",
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"name": "baz",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.name": "baz", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "simulate client error",
|
||||
selector: metav1.LabelSelector{},
|
||||
values: nil,
|
||||
expected: nil,
|
||||
clientError: true,
|
||||
expectedError: fmt.Errorf("could not list Secrets"),
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace")
|
||||
|
||||
got, err := clusterGenerator.GenerateParams(&argoappsetv1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoappsetv1alpha1.ClusterGenerator{
|
||||
Selector: testCase.selector,
|
||||
Values: testCase.values,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeClusterName(t *testing.T) {
|
||||
t.Run("valid DNS-1123 subdomain name", func(t *testing.T) {
|
||||
assert.Equal(t, "cluster-name", sanitizeName("cluster-name"))
|
||||
})
|
||||
t.Run("invalid DNS-1123 subdomain name", func(t *testing.T) {
|
||||
invalidName := "-.--CLUSTER/name -./.-"
|
||||
assert.Equal(t, "cluster-name", sanitizeName(invalidName))
|
||||
})
|
||||
}
|
||||
229
applicationset/generators/duck_type.go
Normal file
229
applicationset/generators/duck_type.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*DuckTypeGenerator)(nil)
|
||||
|
||||
// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type DuckTypeGenerator struct {
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
settingsManager *settings.SettingsManager
|
||||
}
|
||||
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator {
|
||||
|
||||
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
|
||||
|
||||
g := &DuckTypeGenerator{
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
settingsManager: settingsManager,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
|
||||
// Return a requeue default of 3 minutes, if no override is specified.
|
||||
|
||||
if appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.ClusterDecisionResource.Template
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// Not likely to happen
|
||||
if appSetGenerator.ClusterDecisionResource == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the configMapRef
|
||||
cm, err := g.clientset.CoreV1().ConfigMaps(g.namespace).Get(g.ctx, appSetGenerator.ClusterDecisionResource.ConfigMapRef, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract GVK data for the dynamic client to use
|
||||
versionIdx := strings.Index(cm.Data["apiVersion"], "/")
|
||||
kind := cm.Data["kind"]
|
||||
resourceName := appSetGenerator.ClusterDecisionResource.Name
|
||||
labelSelector := appSetGenerator.ClusterDecisionResource.LabelSelector
|
||||
|
||||
log.WithField("kind.apiVersion", kind+"."+cm.Data["apiVersion"]).Info("Kind.Group/Version Reference")
|
||||
|
||||
// Validate the fields
|
||||
if kind == "" || versionIdx < 1 {
|
||||
log.Warningf("kind=%v, resourceName=%v, versionIdx=%v", kind, resourceName, versionIdx)
|
||||
return nil, fmt.Errorf("There is a problem with the apiVersion, kind or resourceName provided")
|
||||
}
|
||||
|
||||
if (resourceName == "" && labelSelector.MatchLabels == nil && labelSelector.MatchExpressions == nil) ||
|
||||
(resourceName != "" && (labelSelector.MatchExpressions != nil || labelSelector.MatchLabels != nil)) {
|
||||
|
||||
log.Warningf("You must choose either resourceName=%v, labelSelector.matchLabels=%v or labelSelect.matchExpressions=%v", resourceName, labelSelector.MatchLabels, labelSelector.MatchExpressions)
|
||||
return nil, fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator")
|
||||
}
|
||||
|
||||
// Split up the apiVersion
|
||||
group := cm.Data["apiVersion"][0:versionIdx]
|
||||
version := cm.Data["apiVersion"][versionIdx+1:]
|
||||
log.WithField("kind.group.version", kind+"."+group+"/"+version).Debug("decoded Ref")
|
||||
|
||||
duckGVR := schema.GroupVersionResource{Group: group, Version: version, Resource: kind}
|
||||
|
||||
listOptions := metav1.ListOptions{}
|
||||
if resourceName == "" {
|
||||
listOptions.LabelSelector = metav1.FormatLabelSelector(&labelSelector)
|
||||
log.WithField("listOptions.LabelSelector", listOptions.LabelSelector).Info("selection type")
|
||||
} else {
|
||||
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", resourceName).String()
|
||||
//metav1.Convert_fields_Selector_To_string(fields.).Sprintf("metadata.name=%s", resourceName)
|
||||
log.WithField("listOptions.FieldSelector", listOptions.FieldSelector).Info("selection type")
|
||||
}
|
||||
|
||||
duckResources, err := g.dynClient.Resource(duckGVR).Namespace(g.namespace).List(g.ctx, listOptions)
|
||||
|
||||
if err != nil {
|
||||
log.WithField("GVK", duckGVR).Warning("resources were not found")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(duckResources.Items) == 0 {
|
||||
log.Warning("no resource found, make sure you clusterDecisionResource is defined correctly")
|
||||
return nil, fmt.Errorf("no clusterDecisionResources found")
|
||||
}
|
||||
|
||||
// Override the duck type in the status of the resource
|
||||
statusListKey := "clusters"
|
||||
|
||||
matchKey := cm.Data["matchKey"]
|
||||
|
||||
if cm.Data["statusListKey"] != "" {
|
||||
statusListKey = cm.Data["statusListKey"]
|
||||
}
|
||||
if matchKey == "" {
|
||||
log.WithField("matchKey", matchKey).Warning("matchKey not found in " + cm.Name)
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
clusterDecisions := []interface{}{}
|
||||
|
||||
// Build the decision slice
|
||||
for _, duckResource := range duckResources.Items {
|
||||
log.WithField("duckResourceName", duckResource.GetName()).Debug("found resource")
|
||||
|
||||
if duckResource.Object["status"] == nil || len(duckResource.Object["status"].(map[string]interface{})) == 0 {
|
||||
log.Warningf("clusterDecisionResource: %s, has no status", duckResource.GetName())
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("duckResourceStatus", duckResource.Object["status"]).Debug("found resource")
|
||||
|
||||
clusterDecisions = append(clusterDecisions, duckResource.Object["status"].(map[string]interface{})[statusListKey].([]interface{})...)
|
||||
|
||||
}
|
||||
log.Infof("Number of decisions found: %v", len(clusterDecisions))
|
||||
|
||||
// Read this outside the loop to improve performance
|
||||
argoClusters := clustersFromArgoCD.Items
|
||||
|
||||
if len(clusterDecisions) > 0 {
|
||||
for _, cluster := range clusterDecisions {
|
||||
|
||||
// generated instance of cluster params
|
||||
params := map[string]string{}
|
||||
|
||||
log.Infof("cluster: %v", cluster)
|
||||
matchValue := cluster.(map[string]interface{})[matchKey]
|
||||
if matchValue == nil || matchValue.(string) == "" {
|
||||
log.Warningf("matchKey=%v not found in \"%v\" list: %v\n", matchKey, statusListKey, cluster.(map[string]interface{}))
|
||||
continue
|
||||
}
|
||||
|
||||
strMatchValue := matchValue.(string)
|
||||
log.WithField(matchKey, strMatchValue).Debug("validate against ArgoCD")
|
||||
|
||||
found := false
|
||||
|
||||
for _, argoCluster := range argoClusters {
|
||||
if argoCluster.Name == strMatchValue {
|
||||
|
||||
log.WithField(matchKey, argoCluster.Name).Info("matched cluster in ArgoCD")
|
||||
params["name"] = argoCluster.Name
|
||||
params["server"] = argoCluster.Server
|
||||
|
||||
found = true
|
||||
break // Stop looking
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.WithField(matchKey, strMatchValue).Warning("unmatched cluster in ArgoCD")
|
||||
continue
|
||||
}
|
||||
|
||||
for key, value := range cluster.(map[string]interface{}) {
|
||||
params[key] = value.(string)
|
||||
}
|
||||
|
||||
for key, value := range appSetGenerator.ClusterDecisionResource.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("clusterDecisionResource status." + statusListKey + " missing")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
315
applicationset/generators/duck_type_test.go
Normal file
315
applicationset/generators/duck_type_test.go
Normal file
@@ -0,0 +1,315 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
const resourceApiVersion = "mallard.io/v1"
|
||||
const resourceKind = "ducks"
|
||||
const resourceName = "quak"
|
||||
|
||||
func TestGenerateParamsForDuckType(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "staging-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("staging-01"),
|
||||
"server": []byte("https://staging-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "production-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production-01"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
|
||||
duckType := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "all-species"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "staging-01",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
duckTypeProdOnly := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "spotted"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
duckTypeEmpty := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "canvasback"},
|
||||
},
|
||||
"status": map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
configMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-configmap",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": resourceKind,
|
||||
"statusListKey": "decisions",
|
||||
"matchKey": "clusterName",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
configMapRef string
|
||||
resourceName string
|
||||
labelSelector metav1.LabelSelector
|
||||
resource *unstructured.Unstructured
|
||||
values map[string]string
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "no duck resource",
|
||||
resourceName: "",
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
|
||||
},
|
||||
/*** This does not work with the FAKE runtime client, fieldSelectors are broken.
|
||||
{
|
||||
name: "invalid name for duck resource",
|
||||
resourceName: resourceName + "-different",
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("duck.mallard.io \"quak\" not found"),
|
||||
},
|
||||
***/
|
||||
{
|
||||
name: "duck type generator resourceName",
|
||||
resourceName: resourceName,
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only",
|
||||
resourceName: resourceName,
|
||||
resource: duckTypeProdOnly,
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type empty status",
|
||||
resourceName: resourceName,
|
||||
resource: duckTypeEmpty,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type empty status labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "canvasback"}},
|
||||
resource: duckTypeEmpty,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "all-species"}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
resource: duckTypeProdOnly,
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "spotted"}},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator labelSelector.matchExpressions",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "duck",
|
||||
Operator: "In",
|
||||
Values: []string{"all-species", "marbled"},
|
||||
},
|
||||
}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator resourceName and labelSelector.matchExpressions",
|
||||
resourceName: resourceName,
|
||||
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "duck",
|
||||
Operator: "In",
|
||||
Values: []string{"all-species", "marbled"},
|
||||
},
|
||||
}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(append(runtimeClusters, configMap)...)
|
||||
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{{
|
||||
Group: "mallard.io",
|
||||
Version: "v1",
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
var duckTypeGenerator = NewDuckTypeGenerator(context.Background(), fakeDynClient, appClientset, "namespace")
|
||||
|
||||
got, err := duckTypeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
ClusterDecisionResource: &argoprojiov1alpha1.DuckTypeGenerator{
|
||||
ConfigMapRef: "my-configmap",
|
||||
Name: testCase.resourceName,
|
||||
LabelSelector: testCase.labelSelector,
|
||||
Values: testCase.values,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
83
applicationset/generators/generator_spec_processor.go
Normal file
83
applicationset/generators/generator_spec_processor.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
type TransformResult struct {
|
||||
Params []map[string]string
|
||||
Template argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
//Transform a spec generator to list of paramSets and a template
|
||||
func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, allGenerators map[string]Generator, baseTemplate argoprojiov1alpha1.ApplicationSetTemplate, appSet *argoprojiov1alpha1.ApplicationSet) ([]TransformResult, error) {
|
||||
res := []TransformResult{}
|
||||
var firstError error
|
||||
|
||||
generators := GetRelevantGenerators(&requestedGenerator, allGenerators)
|
||||
for _, g := range generators {
|
||||
// we call mergeGeneratorTemplate first because GenerateParams might be more costly so we want to fail fast if there is an error
|
||||
mergedTemplate, err := mergeGeneratorTemplate(g, &requestedGenerator, baseTemplate)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Error("error generating params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
params, err := g.GenerateParams(&requestedGenerator, appSet)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Error("error generating params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, TransformResult{
|
||||
Params: params,
|
||||
Template: mergedTemplate,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return res, firstError
|
||||
|
||||
}
|
||||
|
||||
func GetRelevantGenerators(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, generators map[string]Generator) []Generator {
|
||||
var res []Generator
|
||||
|
||||
v := reflect.Indirect(reflect.ValueOf(requestedGenerator))
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if !field.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.ValueOf(field.Interface()).IsNil() {
|
||||
res = append(res, generators[v.Type().Field(i).Name])
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) (argoprojiov1alpha1.ApplicationSetTemplate, error) {
|
||||
|
||||
// Make a copy of the value from `GetTemplate()` before merge, rather than copying directly into
|
||||
// the provided parameter (which will touch the original resource object returned by client-go)
|
||||
dest := g.GetTemplate(requestedGenerator).DeepCopy()
|
||||
|
||||
err := mergo.Merge(dest, applicationSetTemplate)
|
||||
|
||||
return *dest, err
|
||||
}
|
||||
225
applicationset/generators/git.go
Normal file
225
applicationset/generators/git.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jeremywohl/flatten"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*GitGenerator)(nil)
|
||||
|
||||
type GitGenerator struct {
|
||||
repos services.Repos
|
||||
}
|
||||
|
||||
func NewGitGenerator(repos services.Repos) Generator {
|
||||
g := &GitGenerator{
|
||||
repos: repos,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Git.Template
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
|
||||
// Return a requeue default of 3 minutes, if no default is specified.
|
||||
|
||||
if appSetGenerator.Git.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.Git.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.Git == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
var err error
|
||||
var res []map[string]string
|
||||
if appSetGenerator.Git.Directories != nil {
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator)
|
||||
} else if appSetGenerator.Git.Files != nil {
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator)
|
||||
} else {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) ([]map[string]string, error) {
|
||||
|
||||
// Directories, not files
|
||||
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"allPaths": allPaths,
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
}).Info("applications result from the repo service")
|
||||
|
||||
requestedApps := g.filterApps(appSetGenerator.Git.Directories, allPaths)
|
||||
|
||||
res := g.generateParamsFromApps(requestedApps, appSetGenerator)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) ([]map[string]string, error) {
|
||||
|
||||
// Get all files that match the requested path string, removing duplicates
|
||||
allFiles := make(map[string][]byte)
|
||||
for _, requestedPath := range appSetGenerator.Git.Files {
|
||||
files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for filePath, content := range files {
|
||||
allFiles[filePath] = content
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the unduplicated map into a list, and sort by path to ensure a deterministic
|
||||
// processing order in the subsequent step
|
||||
allPaths := []string{}
|
||||
for path := range allFiles {
|
||||
allPaths = append(allPaths, path)
|
||||
}
|
||||
sort.Strings(allPaths)
|
||||
|
||||
// Generate params from each path, and return
|
||||
res := []map[string]string{}
|
||||
for _, path := range allPaths {
|
||||
|
||||
// A JSON / YAML file path can contain multiple sets of parameters (ie it is an array)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process file '%s': %v", path, err)
|
||||
}
|
||||
|
||||
for index := range paramsArray {
|
||||
res = append(res, paramsArray[index])
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte) ([]map[string]string, error) {
|
||||
objectsFound := []map[string]interface{}{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
err := yaml.Unmarshal(fileContent, &objectsFound)
|
||||
if err != nil {
|
||||
// If unable to parse as an array, attempt to parse as a single object
|
||||
singleObj := make(map[string]interface{})
|
||||
err = yaml.Unmarshal(fileContent, &singleObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse file: %v", err)
|
||||
}
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
// Flatten all objects found, and return them
|
||||
for _, objectFound := range objectsFound {
|
||||
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params := map[string]string{}
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
params["path"] = path.Dir(filePath)
|
||||
params["path.basename"] = path.Base(params["path"])
|
||||
params["path.basenameNormalized"] = sanitizeName(path.Base(params["path"]))
|
||||
for k, v := range strings.Split(strings.TrimSuffix(params["path"], params["path.basename"]), "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryGeneratorItem, allPaths []string) []string {
|
||||
res := []string{}
|
||||
for _, appPath := range allPaths {
|
||||
appInclude := false
|
||||
appExclude := false
|
||||
// Iterating over each appPath and check whether directories object has requestedPath that matches the appPath
|
||||
for _, requestedPath := range Directories {
|
||||
match, err := path.Match(requestedPath.Path, appPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedPath", requestedPath).
|
||||
WithField("appPath", appPath).Error("error while matching appPath to requestedPath")
|
||||
continue
|
||||
}
|
||||
if match && !requestedPath.Exclude {
|
||||
appInclude = true
|
||||
}
|
||||
if match && requestedPath.Exclude {
|
||||
appExclude = true
|
||||
}
|
||||
}
|
||||
// Whenever there is a path with exclude: true it wont be included, even if it is included in a different path pattern
|
||||
if appInclude && !appExclude {
|
||||
res = append(res, appPath)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argoprojiov1alpha1.ApplicationSetGenerator) []map[string]string {
|
||||
// TODO: At some point, the appicationSetGenerator param should be used
|
||||
|
||||
res := make([]map[string]string, len(requestedApps))
|
||||
for i, a := range requestedApps {
|
||||
|
||||
params := make(map[string]string, 2)
|
||||
params["path"] = a
|
||||
params["path.basename"] = path.Base(a)
|
||||
params["path.basenameNormalized"] = sanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(strings.TrimSuffix(params["path"], params["path.basename"]), "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
res[i] = params
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
452
applicationset/generators/git_test.go
Normal file
452
applicationset/generators/git_test.go
Normal file
@@ -0,0 +1,452 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// type clientSet struct {
|
||||
// RepoServerServiceClient apiclient.RepoServerServiceClient
|
||||
// }
|
||||
|
||||
// func (c *clientSet) NewRepoServerClient() (io.Closer, apiclient.RepoServerServiceClient, error) {
|
||||
// return io.NewCloser(func() error { return nil }), c.RepoServerServiceClient, nil
|
||||
// }
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "app_3", "path.basename": "app_3", "path.basenameNormalized": "app-3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"p1/app2",
|
||||
"p1/p2/app3",
|
||||
"p1/p2/p3/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "p1/app2", "path.basename": "app2", "path[0]": "p1", "path.basenameNormalized": "app2"},
|
||||
{"path": "p1/p2/app3", "path.basename": "app3", "path[0]": "p1", "path[1]": "p2", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths with Exclude",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*", Exclude: true}, {Path: "*"}, {Path: "*/*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"p1/app2",
|
||||
"p1/app3",
|
||||
"p2/app3",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Expecting same exclude behavior with different order",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}, {Path: "*/*"}, {Path: "p1/*", Exclude: true}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"p1/app2",
|
||||
"p1/app3",
|
||||
"p2/app3",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles empty response from repo server",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles error from repo server",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{},
|
||||
repoError: fmt.Errorf("error"),
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], nil)
|
||||
|
||||
if testCaseCopy.expectedError != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
// files is the list of paths/globs to match
|
||||
files []argoprojiov1alpha1.GitFileGeneratorItem
|
||||
// repoFileContents maps repo path to the literal contents of that path
|
||||
repoFileContents map[string][]byte
|
||||
// if repoPathsError is non-nil, the call to GetPaths(...) will return this error value
|
||||
repoPathsError error
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow: create params from git files",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "production",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
},
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}`),
|
||||
"cluster-config/staging/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "foo.bar@example.com",
|
||||
"name": "staging",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "production",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "foo.bar@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/staging",
|
||||
"path.basename": "staging",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "staging",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles error during getting repo paths",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{},
|
||||
repoPathsError: fmt.Errorf("paths error"),
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("paths error"),
|
||||
},
|
||||
{
|
||||
name: "test invalid JSON file returns error",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`invalid json file`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`
|
||||
[
|
||||
{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "production",
|
||||
"address": "https://kubernetes.default.svc",
|
||||
"inner": {
|
||||
"one" : "two"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "staging",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
}
|
||||
}
|
||||
]`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"cluster.inner.one": "two",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "production",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "production",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Test YAML flow",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`
|
||||
cluster:
|
||||
owner: john.doe@example.com
|
||||
name: production
|
||||
address: https://kubernetes.default.svc
|
||||
key1: val1
|
||||
key2:
|
||||
key2_1: val2_1
|
||||
key2_2:
|
||||
key2_2_1: val2_2_1
|
||||
`),
|
||||
"cluster-config/staging/config.yaml": []byte(`
|
||||
cluster:
|
||||
owner: foo.bar@example.com
|
||||
name: staging
|
||||
address: https://kubernetes.default.svc
|
||||
`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "production",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "foo.bar@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/staging",
|
||||
"path.basename": "staging",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "staging",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "test YAML array",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`
|
||||
- cluster:
|
||||
owner: john.doe@example.com
|
||||
name: production
|
||||
address: https://kubernetes.default.svc
|
||||
inner:
|
||||
one: two
|
||||
- cluster:
|
||||
owner: john.doe@example.com
|
||||
name: staging
|
||||
address: https://kubernetes.default.svc`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"cluster.inner.one": "two",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "production",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path.basenameNormalized": "production",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: testCaseCopy.files,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], nil)
|
||||
fmt.Println(got, err)
|
||||
|
||||
if testCaseCopy.expectedError != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
32
applicationset/generators/interface.go
Normal file
32
applicationset/generators/interface.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// Generator defines the interface implemented by all ApplicationSet generators.
|
||||
type Generator interface {
|
||||
// GenerateParams interprets the ApplicationSet and generates all relevant parameters for the application template.
|
||||
// The expected / desired list of parameters is returned, it then will be render and reconciled
|
||||
// against the current state of the Applications in the cluster.
|
||||
GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error)
|
||||
|
||||
// GetRequeueAfter is the the generator can controller the next reconciled loop
|
||||
// In case there is more then one generator the time will be the minimum of the times.
|
||||
// In case NoRequeueAfter is empty, it will be ignored
|
||||
GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration
|
||||
|
||||
// GetTemplate returns the inline template from the spec if there is any, or an empty object otherwise
|
||||
GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
var EmptyAppSetGeneratorError = fmt.Errorf("ApplicationSet is empty")
|
||||
var NoRequeueAfter time.Duration
|
||||
|
||||
// DefaultRequeueAfterSeconds is used when GetRequeueAfter is not specified, it is the default time to wait before the next reconcile loop
|
||||
const (
|
||||
DefaultRequeueAfterSeconds = 3 * time.Minute
|
||||
)
|
||||
74
applicationset/generators/list.go
Normal file
74
applicationset/generators/list.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*ListGenerator)(nil)
|
||||
|
||||
type ListGenerator struct {
|
||||
}
|
||||
|
||||
func NewListGenerator() Generator {
|
||||
g := &ListGenerator{}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.List.Template
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.List == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
res := make([]map[string]string, len(appSetGenerator.List.Elements))
|
||||
|
||||
for i, tmpItem := range appSetGenerator.List.Elements {
|
||||
params := map[string]string{}
|
||||
var element map[string]interface{}
|
||||
err := json.Unmarshal(tmpItem.Raw, &element)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshling list element %v", err)
|
||||
}
|
||||
|
||||
for key, value := range element {
|
||||
if key == "values" {
|
||||
values, ok := (value).(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing values map")
|
||||
}
|
||||
for k, v := range values {
|
||||
value, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing value as string %v", err)
|
||||
}
|
||||
params[fmt.Sprintf("values.%s", k)] = value
|
||||
}
|
||||
} else {
|
||||
v, ok := value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing value as string %v", err)
|
||||
}
|
||||
params[key] = v
|
||||
}
|
||||
}
|
||||
|
||||
res[i] = params
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
39
applicationset/generators/list_test.go
Normal file
39
applicationset/generators/list_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestGenerateListParams(t *testing.T) {
|
||||
testCases := []struct {
|
||||
elements []apiextensionsv1.JSON
|
||||
expected []map[string]string
|
||||
}{
|
||||
{
|
||||
elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}},
|
||||
expected: []map[string]string{{"cluster": "cluster", "url": "url"}},
|
||||
}, {
|
||||
elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}},
|
||||
expected: []map[string]string{{"cluster": "cluster", "url": "url", "values.foo": "bar"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
var listGenerator = NewListGenerator()
|
||||
|
||||
got, err := listGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: testCase.elements,
|
||||
}}, nil)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
|
||||
}
|
||||
}
|
||||
157
applicationset/generators/matrix.go
Normal file
157
applicationset/generators/matrix.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*MatrixGenerator)(nil)
|
||||
|
||||
var (
|
||||
ErrMoreThanTwoGenerators = fmt.Errorf("found more than two generators, Matrix support only two")
|
||||
ErrLessThanTwoGenerators = fmt.Errorf("found less than two generators, Matrix support only two")
|
||||
ErrMoreThenOneInnerGenerators = fmt.Errorf("found more than one generator in matrix.Generators")
|
||||
)
|
||||
|
||||
type MatrixGenerator struct {
|
||||
// The inner generators supported by the matrix generator (cluster, git, list...)
|
||||
supportedGenerators map[string]Generator
|
||||
}
|
||||
|
||||
func NewMatrixGenerator(supportedGenerators map[string]Generator) Generator {
|
||||
m := &MatrixGenerator{
|
||||
supportedGenerators: supportedGenerators,
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator.Matrix == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if len(appSetGenerator.Matrix.Generators) < 2 {
|
||||
return nil, ErrLessThanTwoGenerators
|
||||
}
|
||||
|
||||
if len(appSetGenerator.Matrix.Generators) > 2 {
|
||||
return nil, ErrMoreThanTwoGenerators
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
g0, err := m.getParams(appSetGenerator.Matrix.Generators[0], appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
g1, err := m.getParams(appSetGenerator.Matrix.Generators[1], appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, a := range g0 {
|
||||
for _, b := range g1 {
|
||||
val, err := utils.CombineStringMaps(a, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, val)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
var matrix *argoprojiov1alpha1.MatrixGenerator
|
||||
if appSetBaseGenerator.Matrix != nil {
|
||||
// Since nested matrix generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here.
|
||||
nestedMatrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(appSetBaseGenerator.Matrix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unmarshall nested matrix generator: %v", err)
|
||||
}
|
||||
if nestedMatrix != nil {
|
||||
matrix = nestedMatrix.ToMatrixGenerator()
|
||||
}
|
||||
}
|
||||
|
||||
var mergeGenerator *argoprojiov1alpha1.MergeGenerator
|
||||
if appSetBaseGenerator.Merge != nil {
|
||||
// Since nested merge generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here.
|
||||
nestedMerge, err := argoprojiov1alpha1.ToNestedMergeGenerator(appSetBaseGenerator.Merge)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unmarshall nested merge generator: %v", err)
|
||||
}
|
||||
if nestedMerge != nil {
|
||||
mergeGenerator = nestedMerge.ToMergeGenerator()
|
||||
}
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: appSetBaseGenerator.List,
|
||||
Clusters: appSetBaseGenerator.Clusters,
|
||||
Git: appSetBaseGenerator.Git,
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrix,
|
||||
Merge: mergeGenerator,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
appSet)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("child generator returned an error on parameter generation: %v", err)
|
||||
}
|
||||
|
||||
if len(t) == 0 {
|
||||
return nil, fmt.Errorf("child generator generated no parameters")
|
||||
}
|
||||
|
||||
if len(t) > 1 {
|
||||
return nil, ErrMoreThenOneInnerGenerators
|
||||
}
|
||||
|
||||
return t[0].Params, nil
|
||||
}
|
||||
|
||||
const maxDuration time.Duration = 1<<63 - 1
|
||||
|
||||
func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
res := maxDuration
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Matrix.Generators {
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
for _, g := range generators {
|
||||
temp := g.GetRequeueAfter(base)
|
||||
if temp < res && temp != NoRequeueAfter {
|
||||
found = true
|
||||
res = temp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
return res
|
||||
} else {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Matrix.Template
|
||||
}
|
||||
284
applicationset/generators/matrix_test.go
Normal file
284
applicationset/generators/matrix_test.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestMatrixGenerate(t *testing.T) {
|
||||
|
||||
gitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
}
|
||||
|
||||
listGenerator := &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
expectedErr error
|
||||
expected []map[string]string
|
||||
}{
|
||||
{
|
||||
name: "happy flow - generate params",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "cluster": "Cluster", "url": "Url"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "cluster": "Cluster", "url": "Url"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "happy flow - generate params from two lists",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"a": "1"}`)},
|
||||
{Raw: []byte(`{"a": "2"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"b": "1"}`)},
|
||||
{Raw: []byte(`{"b": "2"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"a": "1", "b": "1"},
|
||||
{"a": "1", "b": "2"},
|
||||
{"a": "2", "b": "1"},
|
||||
{"a": "2", "b": "2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns error if there is less than two base generators",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrLessThanTwoGenerators,
|
||||
},
|
||||
{
|
||||
name: "returns error if there is more than two base generators",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrMoreThanTwoGenerators,
|
||||
},
|
||||
{
|
||||
name: "returns error if there is more than one inner generator in the first base generator",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrMoreThenOneInnerGenerators,
|
||||
},
|
||||
{
|
||||
name: "returns error if there is more than one inner generator in the second base generator",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
Git: gitGenerator,
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrMoreThenOneInnerGenerators,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
mock := &generatorMock{}
|
||||
appSet := &argoprojiov1alpha1.ApplicationSet{}
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
mock.On("GenerateParams", &gitGeneratorSpec, appSet).Return([]map[string]string{
|
||||
{
|
||||
"path": "app1",
|
||||
"path.basename": "app1",
|
||||
"path.basenameNormalized": "app1",
|
||||
},
|
||||
{
|
||||
"path": "app2",
|
||||
"path.basename": "app2",
|
||||
"path.basenameNormalized": "app2",
|
||||
},
|
||||
}, nil)
|
||||
|
||||
mock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&argoprojiov1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}, appSet)
|
||||
|
||||
if testCaseCopy.expectedErr != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedErr.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
|
||||
gitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
}
|
||||
|
||||
listGenerator := &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
gitGetRequeueAfter time.Duration
|
||||
expected time.Duration
|
||||
}{
|
||||
{
|
||||
name: "return NoRequeueAfter if all the inner baseGenerators returns it",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: NoRequeueAfter,
|
||||
expected: NoRequeueAfter,
|
||||
},
|
||||
{
|
||||
name: "returns the minimal time",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: time.Duration(1),
|
||||
expected: time.Duration(1),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
mock := &generatorMock{}
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil)
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
got := matrixGenerator.GetRequeueAfter(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
type generatorMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (g *generatorMock) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
args := g.Called(appSetGenerator)
|
||||
|
||||
return args.Get(0).(*argoprojiov1alpha1.ApplicationSetTemplate)
|
||||
}
|
||||
|
||||
func (g *generatorMock) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
args := g.Called(appSetGenerator, appSet)
|
||||
|
||||
return args.Get(0).([]map[string]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (g *generatorMock) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
args := g.Called(appSetGenerator)
|
||||
|
||||
return args.Get(0).(time.Duration)
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user