mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 17:48:47 +01:00
Compare commits
112 Commits
requeue-bu
...
v2.3.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1287d24bfe | ||
|
|
68e825cf9b | ||
|
|
738384bdd0 | ||
|
|
7fb77d578d | ||
|
|
336d61cac6 | ||
|
|
c0b62972ca | ||
|
|
048c025cfe | ||
|
|
7eb34755e5 | ||
|
|
2f4afe8c08 | ||
|
|
92f53aeb72 | ||
|
|
43f8027f6d | ||
|
|
909f94a383 | ||
|
|
af5348c9b1 | ||
|
|
d4faef6324 | ||
|
|
af45491a7d | ||
|
|
b0e4473fcd | ||
|
|
cf1f44e379 | ||
|
|
ac8b7df946 | ||
|
|
5d515b8423 | ||
|
|
69dcee049e | ||
|
|
d36d95dc9f | ||
|
|
df79d7db1d | ||
|
|
7165431a84 | ||
|
|
13ec3f43d8 | ||
|
|
eea93c5103 | ||
|
|
2cc81959b4 | ||
|
|
07ac038a8f | ||
|
|
3828da7f8c | ||
|
|
3bd9121545 | ||
|
|
df6e7c169e | ||
|
|
25b01666f0 | ||
|
|
9d6e6d84de | ||
|
|
7f9ff6e8c3 | ||
|
|
ecc2af9dca | ||
|
|
c5b0279050 | ||
|
|
6df17e7c56 | ||
|
|
e55ecf9107 | ||
|
|
21f208f17e | ||
|
|
b65c1699fa | ||
|
|
4c1428a6be | ||
|
|
bca190bd0c | ||
|
|
88ca5aabf2 | ||
|
|
8297f827a8 | ||
|
|
b85ef39e0d | ||
|
|
fe42780229 | ||
|
|
057d95374d | ||
|
|
1546c1d314 | ||
|
|
31564c6067 | ||
|
|
6e54e59e82 | ||
|
|
196dab98dc | ||
|
|
9d4ed2847e | ||
|
|
ffc6080060 | ||
|
|
ca2e3041f1 | ||
|
|
31676e2aea | ||
|
|
81f9bc20ec | ||
|
|
0b868bd221 | ||
|
|
37c1585c2f | ||
|
|
e095ef6e4c | ||
|
|
8396e06121 | ||
|
|
d4a6498d80 | ||
|
|
4fc814bc16 | ||
|
|
bf67d55f3f | ||
|
|
87402fdc4c | ||
|
|
10b4c47aad | ||
|
|
e6a9400c38 | ||
|
|
ebb21736df | ||
|
|
ea7ad844cb | ||
|
|
76c0b32887 | ||
|
|
827096c195 | ||
|
|
bca450f7aa | ||
|
|
3c7d99f82c | ||
|
|
c5c25cd75b | ||
|
|
f54d0a037f | ||
|
|
6da138956a | ||
|
|
b3a181ea88 | ||
|
|
2f5da24654 | ||
|
|
cc572a5eb2 | ||
|
|
ae03c3b872 | ||
|
|
79ac472382 | ||
|
|
061ba811f1 | ||
|
|
b5909d59ec | ||
|
|
68f17dbe8e | ||
|
|
bc6b4950ba | ||
|
|
c2cf633bbc | ||
|
|
bdd05d5c7a | ||
|
|
4b04a39180 | ||
|
|
aa54aa7eda | ||
|
|
4bbd0e57dd | ||
|
|
2d6b619a86 | ||
|
|
39487ef7c7 | ||
|
|
fcc7c0b080 | ||
|
|
ed734fedbb | ||
|
|
7414f2d42c | ||
|
|
8139df8983 | ||
|
|
f364330de2 | ||
|
|
4ec67d8b08 | ||
|
|
3d3f81df4a | ||
|
|
37f01f6f32 | ||
|
|
36eab6b82c | ||
|
|
793acc147f | ||
|
|
b50609c0e6 | ||
|
|
5f48ce96c6 | ||
|
|
c32460a2bc | ||
|
|
7eb1aba99b | ||
|
|
1a8139f4d6 | ||
|
|
02c03c3b26 | ||
|
|
cdb20d5060 | ||
|
|
7d7eed4932 | ||
|
|
af8c5eb07a | ||
|
|
1a476f7564 | ||
|
|
7f15389c72 | ||
|
|
1a3556e1cc |
@@ -7,7 +7,6 @@ ignore:
|
||||
- "pkg/apis/client/.*"
|
||||
- "pkg/client/.*"
|
||||
- "vendor/.*"
|
||||
- "test/.*"
|
||||
coverage:
|
||||
status:
|
||||
# we've found this not to be useful
|
||||
|
||||
113
.github/workflows/ci-build.yaml
vendored
113
.github/workflows/ci-build.yaml
vendored
@@ -12,14 +12,7 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.17'
|
||||
|
||||
jobs:
|
||||
check-go:
|
||||
@@ -27,9 +20,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -45,13 +38,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -62,22 +55,15 @@ jobs:
|
||||
run: make build-local
|
||||
|
||||
lint-go:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Lint Go code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
uses: actions/checkout@v2
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.45.2
|
||||
version: v1.46.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
|
||||
test-go:
|
||||
@@ -85,18 +71,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -116,17 +99,13 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -137,12 +116,12 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
@@ -152,18 +131,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -183,17 +159,13 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -204,7 +176,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -214,9 +186,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -240,10 +212,9 @@ jobs:
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
- name: Initialize local Helm
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
helm2 init --client-only
|
||||
- name: Run codegen
|
||||
run: |
|
||||
set -x
|
||||
@@ -262,14 +233,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12.18.4'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -299,12 +270,12 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -315,16 +286,16 @@ jobs:
|
||||
run: |
|
||||
mkdir -p test-results
|
||||
- name: Get code coverage artifiact
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: code-coverage
|
||||
- name: Get test result artifact
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: coverage.out
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
@@ -373,26 +344,16 @@ jobs:
|
||||
ARGOCD_IN_CI: "true"
|
||||
ARGOCD_E2E_APISERVER_PORT: "8088"
|
||||
ARGOCD_SERVER: "127.0.0.1:8088"
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
run: |
|
||||
sudo pkill mono || true
|
||||
# ubuntu-22.04 comes with kubectl, but the version is not pinned. The version as of 2022-12-05 is 1.26.0 which
|
||||
# breaks the `TestNamespacedResourceDiffing` e2e test. So we'll pin to 1.25 and then fix the underlying issue.
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
rm /usr/local/bin/kubectl
|
||||
curl -LO https://dl.k8s.io/release/v1.25.4/bin/linux/amd64/kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
- name: Install K3S
|
||||
env:
|
||||
INSTALL_K3S_VERSION: ${{ matrix.k3s-version }}+k3s1
|
||||
@@ -405,7 +366,7 @@ jobs:
|
||||
sudo chown runner $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -431,9 +392,9 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3
|
||||
docker pull quay.io/dexidp/dex:v2.25.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.7-alpine
|
||||
docker pull redis:6.2.6-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
@@ -450,7 +411,7 @@ jobs:
|
||||
count=1
|
||||
until curl -f http://127.0.0.1:8088/healthz; do
|
||||
sleep 10;
|
||||
if test $count -ge 180; then
|
||||
if test $count -ge 60; then
|
||||
echo "Timeout"
|
||||
exit 1
|
||||
fi
|
||||
@@ -461,7 +422,7 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
31
.github/workflows/codeql.yml
vendored
31
.github/workflows/codeql.yml
vendored
@@ -2,26 +2,12 @@ name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
# Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot
|
||||
branches-ignore:
|
||||
- 'dependabot/**'
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
permissions:
|
||||
actions: read # for github/codeql-action/init to get workflow details
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/autobuild to send a status report
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
@@ -29,11 +15,20 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
@@ -41,7 +36,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -55,4 +50,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
uses: github/codeql-action/analyze@v1
|
||||
|
||||
100
.github/workflows/image.yaml
vendored
100
.github/workflows/image.yaml
vendored
@@ -10,40 +10,31 @@ on:
|
||||
types: [ labeled, unlabeled, opened, synchronize, reopened ]
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.17'
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
permissions:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
steps:
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
- uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
|
||||
# get image tag
|
||||
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
id: image
|
||||
|
||||
# login
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
|
||||
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
|
||||
docker login ghcr.io --username $USERNAME --password $PASSWORD
|
||||
docker login quay.io --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
@@ -52,91 +43,20 @@ jobs:
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
|
||||
- name: Setup cache for argocd-ui docker layer
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-single-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-single-buildx
|
||||
|
||||
- name: Build cache for argocd-ui stage
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./src/github.com/argoproj/argo-cd
|
||||
target: argocd-ui
|
||||
push: false
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
|
||||
|
||||
- name: Run non-container Snyk scans
|
||||
if: github.event_name == 'push'
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
run: |
|
||||
npm install -g snyk
|
||||
|
||||
# Run with high threshold to fail build.
|
||||
snyk test --org=argoproj --all-projects --exclude=docs,site --severity-threshold=high --policy-path=.snyk
|
||||
snyk iac test manifests/install.yaml --org=argoproj --severity-threshold=high --policy-path=.snyk
|
||||
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
- run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
then
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
|
||||
--cache-from "type=local,src=/tmp/.buildx-cache" \
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Run container Snyk scan
|
||||
if: github.event_name == 'push'
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
run: |
|
||||
snyk container test quay.io/argoproj/argocd:latest --org=argoproj --file=Dockerfile --severity-threshold=high
|
||||
|
||||
# Temp fix
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
# https://github.com/moby/buildkit/issues/1896
|
||||
- name: Clean up build cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
|
||||
|
||||
# sign container images
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD latest image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
|
||||
# Displays the public key to share.
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
|
||||
# deploy
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
|
||||
161
.github/workflows/release.yaml
vendored
161
.github/workflows/release.yaml
vendored
@@ -2,25 +2,20 @@ name: Create ArgoCD release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "release-v*"
|
||||
- "!release-v1.5*"
|
||||
- "!release-v1.4*"
|
||||
- "!release-v1.3*"
|
||||
- "!release-v1.2*"
|
||||
- "!release-v1.1*"
|
||||
- "!release-v1.0*"
|
||||
- "!release-v0*"
|
||||
- 'release-v*'
|
||||
- '!release-v1.5*'
|
||||
- '!release-v1.4*'
|
||||
- '!release-v1.3*'
|
||||
- '!release-v1.2*'
|
||||
- '!release-v1.1*'
|
||||
- '!release-v1.0*'
|
||||
- '!release-v0*'
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.17'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
permissions:
|
||||
contents: write # To push changes to release branch
|
||||
name: Perform automatic release on trigger ${{ github.ref }}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-latest
|
||||
@@ -43,7 +38,7 @@ jobs:
|
||||
GIT_EMAIL: argoproj@gmail.com
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -147,7 +142,7 @@ jobs:
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -177,10 +172,7 @@ jobs:
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
|
||||
# We install kustomize in the dist directory
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
|
||||
helm2 init --client-only
|
||||
make manifests-local VERSION=${TARGET_VERSION}
|
||||
git diff
|
||||
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
|
||||
@@ -199,52 +191,29 @@ jobs:
|
||||
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
run: |
|
||||
set -ue
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_TOKEN}"
|
||||
# Remove the following when Docker Hub is gone
|
||||
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
|
||||
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
./dist/argocd-linux-amd64 version --client
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD container images and assets
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST }}
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
|
||||
# Retrieves the public key to release as an asset
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
|
||||
with:
|
||||
uses: juliangruber/read-file-action@v1
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
- name: Push changes to release branch
|
||||
@@ -253,8 +222,8 @@ jobs:
|
||||
git push origin ${TARGET_BRANCH}
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
|
||||
- name: Create GitHub release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: create_release
|
||||
@@ -264,7 +233,61 @@ jobs:
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }}
|
||||
if: ${{ env.DRY_RUN == 'true' }}
|
||||
|
||||
- name: Upload argocd-linux-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-linux-amd64
|
||||
asset_name: argocd-linux-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-linux-arm64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-linux-arm64
|
||||
asset_name: argocd-linux-arm64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-darwin-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-darwin-amd64
|
||||
asset_name: argocd-darwin-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-darwin-arm64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-darwin-arm64
|
||||
asset_name: argocd-darwin-arm64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-windows-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-windows-amd64.exe
|
||||
asset_name: argocd-windows-amd64.exe
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Generate SBOM (spdx)
|
||||
id: spdx-builder
|
||||
@@ -275,7 +298,7 @@ jobs:
|
||||
SIGS_BOM_VERSION: v0.2.1
|
||||
# comma delimited list of project relative folders to inspect for package
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
run: |
|
||||
@@ -297,34 +320,21 @@ jobs:
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Sign sbom
|
||||
run: |
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
- name: Upload SBOM to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
name: ${{ env.RELEASE_TAG }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
|
||||
files: |
|
||||
dist/argocd-*
|
||||
/tmp/sbom.tar.gz
|
||||
/tmp/sbom.tar.gz.sig
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: /tmp/sbom.tar.gz
|
||||
asset_name: sbom.tar.gz
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Update homebrew formula
|
||||
env:
|
||||
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
|
||||
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
|
||||
uses: dawidd6/action-homebrew-bump-formula@v3
|
||||
with:
|
||||
token: ${{env.HOMEBREW_TOKEN}}
|
||||
formula: argocd
|
||||
@@ -335,3 +345,4 @@ jobs:
|
||||
set -ue
|
||||
git push --delete origin ${SOURCE_TAG}
|
||||
if: ${{ always() }}
|
||||
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,10 +16,9 @@ test-results
|
||||
.scratch
|
||||
node_modules/
|
||||
.kube/
|
||||
./test/cmp/*.sock
|
||||
|
||||
# ignore built binaries
|
||||
cmd/argocd/argocd
|
||||
cmd/argocd-application-controller/argocd-application-controller
|
||||
cmd/argocd-repo-server/argocd-repo-server
|
||||
cmd/argocd-server/argocd-server
|
||||
cmd/argocd-server/argocd-server
|
||||
22
.snyk
22
.snyk
@@ -1,22 +0,0 @@
|
||||
# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
|
||||
version: v1.22.1
|
||||
# ignores vulnerabilities until expiry date; change duration by modifying expiry date
|
||||
ignore:
|
||||
SNYK-JS-ANSIREGEX-1583908:
|
||||
- '*':
|
||||
reason: >-
|
||||
Code is only run client-side in the swagger-ui endpoint. No risk of
|
||||
server-side DoS.
|
||||
SNYK-CC-K8S-44:
|
||||
- 'manifests/core-install.yaml > *':
|
||||
reason: >-
|
||||
Argo CD needs wide permissions to manage resources.
|
||||
- 'manifests/install.yaml > *':
|
||||
reason: >-
|
||||
Argo CD needs wide permissions to manage resources.
|
||||
SNYK-JS-MOMENT-2440688:
|
||||
- '*':
|
||||
reason: >-
|
||||
Code is only run client-side. No risk of directory traversal.
|
||||
patch: {}
|
||||
|
||||
59
CHANGELOG.md
59
CHANGELOG.md
@@ -1,63 +1,6 @@
|
||||
# Changelog
|
||||
|
||||
## v2.4.0 (Unreleased)
|
||||
|
||||
### Web Terminal In Argo CD UI
|
||||
|
||||
Feature enables engineers to start a shell in the running application container without leaving the web interface. Just find the required Kubernetes
|
||||
Pod using the Application Details page, click on it and select the Terminal tab. The shell starts automatically and enables you to execute the required
|
||||
commands, and helps to troubleshoot the application state.
|
||||
|
||||
### Access Control For Pod Logs & Web Terminal
|
||||
|
||||
Argo CD is used to manage the critical infrastructure of multiple organizations, which makes security the top priority of the project. We've listened to
|
||||
your feedback and introduced additional access control settings that control access to Kubernetes Pod logs and the new Web Terminal feature.
|
||||
|
||||
#### Pod Logs UI
|
||||
|
||||
Since 2.4.9, the LOGS tab in pod view is visible in the UI only for users with explicit allow get logs policy.
|
||||
|
||||
#### Known pod logs UI issue prior to 2.4.9
|
||||
|
||||
Upon pressing the "LOGS" tab in pod view by users who don't have an explicit allow get logs policy, the red "unable to load data: Internal error" is received in the bottom of the screen, and "Failed to load data, please try again" is displayed.
|
||||
|
||||
### OpenTelemetry Tracing Integration
|
||||
|
||||
The new feature allows emitting richer telemetry data that might make identifying performance bottlenecks easier. The new feature is available for argocd-server
|
||||
and argocd-repo-server components and can be enabled using the --otlp-address flag.
|
||||
|
||||
### Power PC and IBM Z Support
|
||||
|
||||
The list of supported architectures has been expanded, and now includes IBM Z (s390x) and PowerPC (ppc64le). Starting with the v2.4 release the official quay.io
|
||||
repository is going to have images for amd64, arm64, ppc64le, and s390x architectures.
|
||||
|
||||
### Other Notable Changes
|
||||
|
||||
Overall v2.4 release includes more than 300 hundred commits from nearly 90 contributors. Here is a short sample of the contributions:
|
||||
|
||||
* Enforce the deployment to remote clusters only
|
||||
* Native support of GCP authentication for GKE
|
||||
* Secured Redis connection
|
||||
* ApplicationSet Gitea support
|
||||
|
||||
|
||||
## v2.3.3 (2022-03-29)
|
||||
|
||||
- fix: prevent excessive repo-server disk usage for large repos (#8845) (#8897)
|
||||
- fix: Set QPS and burst rate for resource ops client (#8915)
|
||||
|
||||
## v2.3.2 (2022-03-22)
|
||||
|
||||
- fix: application resource APIs must enforce project restrictions
|
||||
|
||||
## v2.3.1 (2022-03-10)
|
||||
|
||||
- fix: Retry checkbox unchecked unexpectedly; Sync up with YAML (#8682) (#8720)
|
||||
- chore: Bump stable version of application set addon (#8744)
|
||||
- fix: correct jsonnet paths resolution (#8721)
|
||||
- fix(ui): Applications page incorrectly resets to tiles view. Fixes #8702 (#8718)
|
||||
|
||||
## v2.3.0 (2022-03-05)
|
||||
## v2.3.0 (Unreleased)
|
||||
|
||||
### Argo CD ApplicationSet and Notifications are now part of Argo CD
|
||||
|
||||
|
||||
82
Dockerfile
82
Dockerfile
@@ -1,17 +1,16 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:21.10
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.18 AS builder
|
||||
FROM docker.io/library/golang:1.17 as builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
RUN apt-get update && apt-get install -y \
|
||||
openssh-server \
|
||||
nginx \
|
||||
unzip \
|
||||
fcgiwrap \
|
||||
git \
|
||||
git-lfs \
|
||||
@@ -25,16 +24,20 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
ADD hack/install.sh .
|
||||
ADD hack/installers installers
|
||||
ADD hack/tool-versions.sh .
|
||||
|
||||
RUN ./install.sh helm-linux && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
RUN ./install.sh ksonnet-linux
|
||||
RUN ./install.sh helm2-linux
|
||||
RUN ./install.sh helm-linux
|
||||
RUN ./install.sh kustomize-linux
|
||||
RUN ./install.sh awscli-linux
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
####################################################################################################
|
||||
FROM $BASE_IMAGE AS argocd-base
|
||||
FROM $BASE_IMAGE as argocd-base
|
||||
|
||||
USER root
|
||||
|
||||
@@ -47,31 +50,34 @@ RUN groupadd -g 999 argocd && \
|
||||
chmod g=u /home/argocd && \
|
||||
apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install -y \
|
||||
git git-lfs tini gpg tzdata && \
|
||||
apt-get install -y git git-lfs tini gpg tzdata && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
COPY hack/gpg-wrapper.sh /usr/local/bin/gpg-wrapper.sh
|
||||
COPY hack/git-verify-wrapper.sh /usr/local/bin/git-verify-wrapper.sh
|
||||
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
|
||||
COPY --from=builder /usr/local/bin/helm2 /usr/local/bin/helm2
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
|
||||
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
|
||||
COPY --from=builder /usr/local/aws-cli/v2/current/dist /usr/local/aws-cli/v2/current/dist
|
||||
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
# keep uid_entrypoint.sh for backward compatibility
|
||||
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
|
||||
RUN ln -s /usr/local/aws-cli/v2/current/dist/aws /usr/local/bin/aws
|
||||
|
||||
# support for mounting configuration from a configmap
|
||||
WORKDIR /app/config/ssh
|
||||
RUN touch ssh_known_hosts && \
|
||||
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
|
||||
RUN mkdir -p /app/config/ssh && \
|
||||
touch /app/config/ssh/ssh_known_hosts && \
|
||||
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
|
||||
|
||||
WORKDIR /app/config
|
||||
RUN mkdir -p tls && \
|
||||
mkdir -p gpg/source && \
|
||||
mkdir -p gpg/keys && \
|
||||
chown argocd gpg/keys && \
|
||||
chmod 0700 gpg/keys
|
||||
RUN mkdir -p /app/config/tls
|
||||
RUN mkdir -p /app/config/gpg/source && \
|
||||
mkdir -p /app/config/gpg/keys && \
|
||||
chown argocd /app/config/gpg/keys && \
|
||||
chmod 0700 /app/config/gpg/keys
|
||||
|
||||
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
|
||||
ENV USER=argocd
|
||||
|
||||
USER 999
|
||||
@@ -80,37 +86,35 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 AS argocd-ui
|
||||
FROM docker.io/library/node:12.18.4 as argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
ADD ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
|
||||
RUN yarn install --network-timeout 200000 && \
|
||||
yarn cache clean
|
||||
RUN yarn install --network-timeout 200000
|
||||
|
||||
COPY ["ui/", "."]
|
||||
ADD ["ui/", "."]
|
||||
|
||||
ARG ARGO_VERSION=latest
|
||||
ENV ARGO_VERSION=$ARGO_VERSION
|
||||
ARG TARGETARCH
|
||||
RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OPTIONS=--max_old_space_size=8192 yarn build
|
||||
RUN HOST_ARCH='amd64' NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OPTIONS=--max_old_space_size=8192 yarn build
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.18 AS argocd-build
|
||||
FROM docker.io/library/golang:1.17 as argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
COPY go.* ./
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
RUN go mod download
|
||||
|
||||
# Perform the build
|
||||
COPY . .
|
||||
COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make argocd-all
|
||||
RUN make argocd-all
|
||||
|
||||
####################################################################################################
|
||||
# Final image
|
||||
@@ -119,13 +123,11 @@ FROM argocd-base
|
||||
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
|
||||
|
||||
USER root
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications
|
||||
|
||||
USER 999
|
||||
|
||||
@@ -5,11 +5,9 @@ FROM argocd-base
|
||||
COPY argocd /usr/local/bin/
|
||||
|
||||
USER root
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller
|
||||
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications
|
||||
USER 999
|
||||
|
||||
33
Makefile
33
Makefile
@@ -25,7 +25,7 @@ DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
ARGOCD_PROCFILE?=Procfile
|
||||
|
||||
# Strict mode has been disabled in latest versions of mkdocs-material.
|
||||
# Strict mode has been disabled in latest versions of mkdocs-material.
|
||||
# Thus pointing to the older image of mkdocs-material matching the version used by argo-cd.
|
||||
MKDOCS_DOCKER_IMAGE?=squidfunk/mkdocs-material:4.1.1
|
||||
MKDOCS_RUN_ARGS?=
|
||||
@@ -51,7 +51,6 @@ ARGOCD_E2E_TEST_TIMEOUT?=30m
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
ARGOCD_BIN_MODE?=true
|
||||
|
||||
ARGOCD_LINT_GOGC?=20
|
||||
|
||||
@@ -114,7 +113,7 @@ define run-in-test-client
|
||||
bash -c "$(1)"
|
||||
endef
|
||||
|
||||
#
|
||||
#
|
||||
define exec-in-test-server
|
||||
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
endef
|
||||
@@ -136,6 +135,7 @@ override LDFLAGS += \
|
||||
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
|
||||
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
|
||||
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
|
||||
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
|
||||
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}
|
||||
|
||||
ifeq (${STATIC_BUILD}, true)
|
||||
@@ -205,7 +205,7 @@ clientgen: ensure-gopath
|
||||
|
||||
.PHONY: clidocsgen
|
||||
clidocsgen: ensure-gopath
|
||||
go run tools/cmd-docs/main.go
|
||||
go run tools/cmd-docs/main.go
|
||||
|
||||
|
||||
.PHONY: codegen-local
|
||||
@@ -234,8 +234,6 @@ release-cli: clean-debug build-ui
|
||||
make BIN_NAME=argocd-darwin-arm64 GOOS=darwin GOARCH=arm64 argocd-all
|
||||
make BIN_NAME=argocd-linux-amd64 GOOS=linux argocd-all
|
||||
make BIN_NAME=argocd-linux-arm64 GOOS=linux GOARCH=arm64 argocd-all
|
||||
make BIN_NAME=argocd-linux-ppc64le GOOS=linux GOARCH=ppc64le argocd-all
|
||||
make BIN_NAME=argocd-linux-s390x GOOS=linux GOARCH=s390x argocd-all
|
||||
make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all
|
||||
|
||||
.PHONY: test-tools-image
|
||||
@@ -270,7 +268,7 @@ controller:
|
||||
|
||||
.PHONY: build-ui
|
||||
build-ui:
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-ui --target argocd-ui .
|
||||
docker build -t argocd-ui --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
|
||||
@@ -281,7 +279,7 @@ ifeq ($(DEV_IMAGE), true)
|
||||
# the dist directory is under .dockerignore.
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-base --target argocd-base .
|
||||
docker build -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
@@ -292,7 +290,7 @@ image: build-ui
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
|
||||
@@ -420,11 +418,10 @@ start-e2e: test-tools-image
|
||||
|
||||
# Starts e2e server locally (or within a container)
|
||||
.PHONY: start-e2e-local
|
||||
start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
start-e2e-local:
|
||||
kubectl create ns argocd-e2e || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
@@ -437,11 +434,9 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
BIN_MODE=$(ARGOCD_BIN_MODE) \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
|
||||
@@ -535,7 +530,9 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
|
||||
# Installs all tools required for running unit & end-to-end tests (Linux packages)
|
||||
.PHONY: install-test-tools-local
|
||||
install-test-tools-local:
|
||||
./hack/install.sh kustomize
|
||||
./hack/install.sh kustomize-linux
|
||||
./hack/install.sh ksonnet-linux
|
||||
./hack/install.sh helm2-linux
|
||||
./hack/install.sh helm-linux
|
||||
|
||||
# Installs all tools required for running codegen (Linux packages)
|
||||
@@ -561,11 +558,3 @@ start-test-k8s:
|
||||
.PHONY: list
|
||||
list:
|
||||
@LC_ALL=C $(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$'
|
||||
|
||||
.PHONY: applicationset-controller
|
||||
applicationset-controller:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
|
||||
.PHONY: checksums
|
||||
checksums:
|
||||
sha256sum ./dist/$(BIN_NAME)-* | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BIN_NAME)-$(TARGET_VERSION)-checksums.txt
|
||||
|
||||
8
OWNERS
8
OWNERS
@@ -8,22 +8,20 @@ approvers:
|
||||
- jannfis
|
||||
- jessesuen
|
||||
- jgwest
|
||||
- keithchong
|
||||
- mayzhang2000
|
||||
- rbreeze
|
||||
- leoluz
|
||||
- crenshaw-dev
|
||||
- pasha-codefresh
|
||||
|
||||
reviewers:
|
||||
- dthomson25
|
||||
- tetchel
|
||||
- terrytangyuan
|
||||
- wtam2018
|
||||
- ishitasequeira
|
||||
- reginapizza
|
||||
- hblixt
|
||||
- chetan-rns
|
||||
- wanghong230
|
||||
- pasha-codefresh
|
||||
- ciiay
|
||||
- leoluz
|
||||
- crenshaw-dev
|
||||
- saumeya
|
||||
|
||||
11
Procfile
11
Procfile
@@ -1,12 +1,9 @@
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} "
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:7.0.0-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" == 'true' ] && exit 0 || [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.6-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
|
||||
|
||||
13
README.md
13
README.md
@@ -1,4 +1,8 @@
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [](https://argoproj.github.io/community/join-slack) [](https://codecov.io/gh/argoproj/argo-cd) [](https://github.com/argoproj/argo-cd/releases/latest) [](https://bestpractices.coreinfrastructure.org/projects/4486) [](https://twitter.com/argoproj)
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://codecov.io/gh/argoproj/argo-cd)
|
||||
[](https://github.com/argoproj/argo-cd/releases/latest)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4486)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
@@ -32,8 +36,8 @@ Check live demo at https://cd.apps.argoproj.io/.
|
||||
|
||||
* Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions)
|
||||
* Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack)
|
||||
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
* User Community meeting: [First Wednesday of the month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
|
||||
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
|
||||
* User Community meeting: [Every other Wednesday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
|
||||
|
||||
Participation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
|
||||
@@ -69,6 +73,3 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
|
||||
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
|
||||
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
|
||||
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
|
||||
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
|
||||
|
||||
|
||||
10
SECURITY.md
10
SECURITY.md
@@ -6,7 +6,7 @@ Version: **v1.4 (2022-01-23)**
|
||||
|
||||
As a deployment tool, Argo CD needs to have production access which makes
|
||||
security a very important topic. The Argoproj team takes security very
|
||||
seriously and is continuously working on improving it.
|
||||
seriously and is continuously working on improving it.
|
||||
|
||||
## A word about security scanners
|
||||
|
||||
@@ -60,17 +60,17 @@ We will do our best to react quickly on your inquiry, and to coordinate a fix
|
||||
and disclosure with you. Sometimes, it might take a little longer for us to
|
||||
react (e.g. out of office conditions), so please bear with us in these cases.
|
||||
|
||||
We will publish security advisories using the
|
||||
We will publish security advisiories using the
|
||||
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
|
||||
feature to keep our community well informed, and will credit you for your
|
||||
findings (unless you prefer to stay anonymous, of course).
|
||||
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
|
||||
* cncf-argo-security@lists.cncf.io
|
||||
|
||||
## Securing your Argo CD Instance
|
||||
|
||||
See the [operator manual security page](docs/operator-manual/security.md) for
|
||||
additional information about Argo CD's security features and how to make your
|
||||
See the [operator manual security page](docs/operator-manual/security.md) for
|
||||
additional information about Argo CD's security features and how to make your
|
||||
Argo CD production ready.
|
||||
|
||||
77
USERS.md
77
USERS.md
@@ -13,8 +13,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
1. [Allianz Direct](https://www.allianzdirect.de/)
|
||||
1. [Ambassador Labs](https://www.getambassador.io/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
|
||||
@@ -23,32 +23,25 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
|
||||
1. [Beat](https://thebeat.co/en/)
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BMW Group](https://www.bmwgroup.com/)
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Cobalt](https://www.cobalt.io/)
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [CROZ d.o.o.](https://croz.net/)
|
||||
1. [Crédit Agricole CIB](https://www.ca-cib.com)
|
||||
1. [CROZ d.o.o.](https://croz.net/)
|
||||
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
1. [Cybozu](https://cybozu-global.com)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [D2iQ](https://www.d2iq.com)
|
||||
1. [Datarisk](https://www.datarisk.io/)
|
||||
1. [Deloitte](https://www.deloitte.com/)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [edX](https://edx.org)
|
||||
@@ -56,18 +49,16 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Elium](https://www.elium.com)
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flip](https://flip.id)
|
||||
1. [Fonoa](https://www.fonoa.com/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
1. [Future PLC](https://www.futureplc.com/)
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Garner](https://www.garnercorp.com)
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Generali Deutschland AG](https://www.generali.de/)
|
||||
1. [Gitpod](https://www.gitpod.io)
|
||||
1. [Gllue](https://gllue.com)
|
||||
1. [Glovo](https://www.glovoapp.com)
|
||||
1. [Gllue](https://gllue.com)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Gojek](https://www.gojek.io/)
|
||||
1. [Greenpass](https://www.greenpass.com.br/)
|
||||
@@ -80,34 +71,25 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [IBM](https://www.ibm.com/)
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
1. [JovianX](https://www.jovianx.com/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
1. [KarrotPay](https://www.daangnpay.com/)
|
||||
1. [Karrot](https://www.daangn.com/)
|
||||
1. [KarrotPay](https://www.daangnpay.com/)
|
||||
1. [Kasa](https://kasa.co.kr/)
|
||||
1. [Keeeb](https://www.keeeb.com/)
|
||||
1. [Keptn](https://keptn.sh)
|
||||
1. [Kinguin](https://www.kinguin.net/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
1. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
1. [KubeSphere](https://github.com/kubesphere)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [LINE](https://linecorp.com/en/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
|
||||
1. [Major League Baseball](https://mlb.com)
|
||||
1. [Mambu](https://www.mambu.com/)
|
||||
1. [MariaDB](https://mariadb.com)
|
||||
1. [Mattermost](https://www.mattermost.com)
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
1. [mixi Group](https://mixi.co.jp/)
|
||||
@@ -129,7 +111,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Opensurvey](https://www.opensurvey.co.kr/)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
@@ -143,24 +124,15 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Quipper](https://www.quipper.com/)
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Robotinfra](https://www.robotinfra.com)
|
||||
1. [Rubin Observatory](https://www.lsst.org)
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Speee](https://speee.jp/)
|
||||
1. [Spendesk](https://spendesk.com/)
|
||||
1. [Spores Labs](https://spores.app)
|
||||
1. [Stuart](https://stuart.com/)
|
||||
1. [Sumo Logic](https://sumologic.com/)
|
||||
1. [Sutpc](http://www.sutpc.com/)
|
||||
1. [Swiss Post](https://github.com/swisspost)
|
||||
@@ -169,8 +141,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Syncier](https://syncier.com/)
|
||||
1. [TableCheck](https://tablecheck.com/)
|
||||
1. [Tailor Brands](https://www.tailorbrands.com)
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [ThousandEyes](https://www.thousandeyes.com/)
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
@@ -180,10 +150,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [tru.ID](https://tru.id)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
@@ -191,18 +160,38 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [VSHN - The DevOps Company](https://vshn.ch/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
1. [Wehkamp](https://www.wehkamp.nl/)
|
||||
1. [WeMo Scooter](https://www.wemoscooter.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
|
||||
1. [Witick](https://witick.io/)
|
||||
1. [WooliesX](https://wooliesx.com.au/)
|
||||
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
|
||||
1. [WSpot](https://www.wspot.com.br/)
|
||||
1. [Yieldlab](https://www.yieldlab.de/)
|
||||
1. [Youverify](https://youverify.co/)
|
||||
1. [Yubo](https://www.yubo.live/)
|
||||
1. [Zimpler](https://www.zimpler.com/)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [MariaDB](https://mariadb.com)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
1. [Youverify](https://youverify.co/)
|
||||
1. [Keeeb](https://www.keeeb.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
|
||||
1. [ZOZO](https://corp.zozo.com/)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [RapidAPI](https://www.rapidapi.com/)
|
||||
|
||||
@@ -1,732 +0,0 @@
|
||||
/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
log "github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
)
|
||||
|
||||
const (
|
||||
// Rather than importing the whole argocd-notifications controller, just copying the const here
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/subscriptions.go#L12
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
|
||||
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
|
||||
ReconcileRequeueOnValidationError = time.Minute * 3
|
||||
)
|
||||
|
||||
var (
|
||||
preservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
}
|
||||
)
|
||||
|
||||
// ApplicationSetReconciler reconciles a ApplicationSet object
|
||||
type ApplicationSetReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
Generators map[string]generators.Generator
|
||||
ArgoDB db.ArgoDB
|
||||
ArgoAppClientset appclientset.Interface
|
||||
KubeClientset kubernetes.Interface
|
||||
utils.Policy
|
||||
utils.Renderer
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch
|
||||
|
||||
func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = r.Log.WithValues("applicationset", req.NamespacedName)
|
||||
_ = log.WithField("applicationset", req.NamespacedName)
|
||||
|
||||
var applicationSetInfo argoprojiov1alpha1.ApplicationSet
|
||||
parametersGenerated := false
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, &applicationSetInfo); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
log.WithError(err).Infof("unable to get ApplicationSet: '%v' ", err)
|
||||
}
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
|
||||
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Log a warning if there are unrecognized generators
|
||||
utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: string(applicationSetReason),
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
parametersGenerated = true
|
||||
|
||||
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo, req.Namespace)
|
||||
if err != nil {
|
||||
// While some generators may return an error that requires user intervention,
|
||||
// other generators reference external resources that may change to cause
|
||||
// the error to no longer occur. We thus log the error and requeue
|
||||
// with a timeout to give this another shot at a later time.
|
||||
//
|
||||
// Changes to watched resources will cause this to be reconciled sooner than
|
||||
// the RequeueAfter time.
|
||||
log.Errorf("error occurred during application validation: %s", err.Error())
|
||||
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range desiredApplications {
|
||||
if validateErrors[i] == nil {
|
||||
validApps = append(validApps, desiredApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(validateErrors) > 0 {
|
||||
var message string
|
||||
for _, v := range validateErrors {
|
||||
message = v.Error()
|
||||
log.Errorf("validation error found during application validation: %s", message)
|
||||
}
|
||||
if len(validateErrors) > 1 {
|
||||
// Only the last message gets added to the appset status, to keep the size reasonable.
|
||||
message = fmt.Sprintf("%s (and %d more)", message, len(validateErrors)-1)
|
||||
}
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: message,
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
}
|
||||
|
||||
if r.Policy.Update() {
|
||||
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonUpdateApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
err = r.createInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonCreateApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Policy.Delete() {
|
||||
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonDeleteApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if applicationSetInfo.RefreshRequired() {
|
||||
delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh)
|
||||
err := r.Client.Update(ctx, &applicationSetInfo)
|
||||
if err != nil {
|
||||
log.Warnf("error occurred while updating ApplicationSet: %v", err)
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonRefreshApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
requeueAfter := r.getMinRequeueAfter(&applicationSetInfo)
|
||||
log.WithField("requeueAfter", requeueAfter).Info("end reconcile")
|
||||
|
||||
if len(validateErrors) == 0 {
|
||||
if err := r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{
|
||||
RequeueAfter: requeueAfter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getParametersGeneratedCondition(parametersGenerated bool, message string) argoprojiov1alpha1.ApplicationSetCondition {
|
||||
var paramtersGeneratedCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
if parametersGenerated {
|
||||
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonParametersGenerated,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
} else {
|
||||
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: message,
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonErrorOccurred,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
return paramtersGeneratedCondition
|
||||
}
|
||||
|
||||
func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argoprojiov1alpha1.ApplicationSetCondition {
|
||||
var resourceUpToDateCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
if errorOccurred {
|
||||
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: message,
|
||||
Reason: reason,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
} else {
|
||||
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "ApplicationSet up to date",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
}
|
||||
return resourceUpToDateCondition
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argoprojiov1alpha1.ApplicationSet, condition argoprojiov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error {
|
||||
// check if error occurred during reconcile process
|
||||
errOccurred := condition.Type == argoprojiov1alpha1.ApplicationSetConditionErrorOccurred
|
||||
|
||||
var errOccurredCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
|
||||
if errOccurred {
|
||||
errOccurredCondition = condition
|
||||
} else {
|
||||
errOccurredCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
|
||||
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
|
||||
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
|
||||
|
||||
newConditions := []argoprojiov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
|
||||
|
||||
needToUpdateConditions := false
|
||||
for _, condition := range newConditions {
|
||||
// do nothing if appset already has same condition
|
||||
for _, c := range applicationSet.Status.Conditions {
|
||||
if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) {
|
||||
needToUpdateConditions = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
evaluatedTypes := map[argoprojiov1alpha1.ApplicationSetConditionType]bool{
|
||||
argoprojiov1alpha1.ApplicationSetConditionErrorOccurred: true,
|
||||
argoprojiov1alpha1.ApplicationSetConditionParametersGenerated: true,
|
||||
argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
|
||||
}
|
||||
|
||||
if needToUpdateConditions || len(applicationSet.Status.Conditions) < 3 {
|
||||
// fetch updated Application Set object before updating it
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
|
||||
applicationSet.Status.SetConditions(
|
||||
newConditions, evaluatedTypes,
|
||||
)
|
||||
|
||||
// Update the newly fetched object with new set of conditions
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
if err != nil && !apierr.IsNotFound(err) {
|
||||
return fmt.Errorf("unable to set application set condition: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
|
||||
// generated applications.
|
||||
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argoprojiov1alpha1.ApplicationSet, namespace string) (map[int]error, error) {
|
||||
errorsByIndex := map[int]error{}
|
||||
namesSet := map[string]bool{}
|
||||
for i, app := range desiredApplications {
|
||||
|
||||
if !namesSet[app.Name] {
|
||||
namesSet[app.Name] = true
|
||||
} else {
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, namespace); err != nil {
|
||||
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return errorsByIndex, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argoprojiov1alpha1.ApplicationSet) time.Duration {
|
||||
var res time.Duration
|
||||
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
|
||||
|
||||
relevantGenerators := generators.GetRelevantGenerators(&requestedGenerator, r.Generators)
|
||||
|
||||
for _, g := range relevantGenerators {
|
||||
t := g.GetRequeueAfter(&requestedGenerator)
|
||||
|
||||
if res == 0 {
|
||||
res = t
|
||||
} else if t != 0 && t < res {
|
||||
res = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func getTempApplication(applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) *argov1alpha1.Application {
|
||||
var tmplApplication argov1alpha1.Application
|
||||
tmplApplication.Annotations = applicationSetTemplate.Annotations
|
||||
tmplApplication.Labels = applicationSetTemplate.Labels
|
||||
tmplApplication.Namespace = applicationSetTemplate.Namespace
|
||||
tmplApplication.Name = applicationSetTemplate.Name
|
||||
tmplApplication.Spec = applicationSetTemplate.Spec
|
||||
tmplApplication.Finalizers = applicationSetTemplate.Finalizers
|
||||
|
||||
return &tmplApplication
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argoprojiov1alpha1.ApplicationSetReasonType, error) {
|
||||
var res []argov1alpha1.Application
|
||||
|
||||
var firstError error
|
||||
var applicationSetReason argoprojiov1alpha1.ApplicationSetReasonType
|
||||
|
||||
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
|
||||
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonApplicationParamsGenerationError
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, a := range t {
|
||||
tmplApplication := getTempApplication(a.Template)
|
||||
|
||||
for _, p := range a.Params {
|
||||
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonRenderTemplateParamsError
|
||||
}
|
||||
continue
|
||||
}
|
||||
res = append(res, *app)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
|
||||
log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
|
||||
}
|
||||
|
||||
return res, applicationSetReason, firstError
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
|
||||
// grab the job object, extract the owner...
|
||||
app := rawObj.(*argov1alpha1.Application)
|
||||
owner := metav1.GetControllerOf(app)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
// ...make sure it's a application set...
|
||||
if owner.APIVersion != argoprojiov1alpha1.GroupVersion.String() || owner.Kind != "ApplicationSet" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&argoprojiov1alpha1.ApplicationSet{}).
|
||||
Owns(&argov1alpha1.Application{}).
|
||||
Watches(
|
||||
&source.Kind{Type: &corev1.Secret{}},
|
||||
&clusterSecretEventHandler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
}).
|
||||
// TODO: also watch Applications and respond on changes if we own them.
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// createOrUpdateInCluster will create / update application resources in the cluster.
|
||||
// - For new applications, it will call create
|
||||
// - For existing application, it will call update
|
||||
// The function also adds owner reference to all applications, and uses it to delete them.
|
||||
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var firstError error
|
||||
// Creates or updates the application in appList
|
||||
for _, generatedApp := range desiredApplications {
|
||||
|
||||
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
|
||||
generatedApp.Namespace = applicationSet.Namespace
|
||||
|
||||
found := &argov1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: generatedApp.Name,
|
||||
Namespace: generatedApp.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
// Preserve specially treated argo cd annotations:
|
||||
// * https://github.com/argoproj/applicationset/issues/180
|
||||
// * https://github.com/argoproj/argo-cd/issues/10500
|
||||
for _, key := range preservedAnnotations {
|
||||
if state, exists := found.ObjectMeta.Annotations[key]; exists {
|
||||
if generatedApp.Annotations == nil {
|
||||
generatedApp.Annotations = map[string]string{}
|
||||
}
|
||||
generatedApp.Annotations[key] = state
|
||||
}
|
||||
}
|
||||
found.ObjectMeta.Annotations = generatedApp.Annotations
|
||||
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action)
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// createInCluster will filter from the desiredApplications only the application that needs to be created
|
||||
// Then it will call createOrUpdateInCluster to do the actual create
|
||||
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var createApps []argov1alpha1.Application
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names that are current in the cluster
|
||||
|
||||
for _, app := range current {
|
||||
m[app.Name] = true
|
||||
}
|
||||
|
||||
// filter applications that are not in m[string]bool (new to the cluster)
|
||||
for _, app := range desiredApplications {
|
||||
_, exists := m[app.Name]
|
||||
|
||||
if !exists {
|
||||
createApps = append(createApps, app)
|
||||
}
|
||||
}
|
||||
|
||||
return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
|
||||
// TODO: Should this use the context param?
|
||||
var current argov1alpha1.ApplicationList
|
||||
err := r.Client.List(context.Background(), ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return current.Items, nil
|
||||
}
|
||||
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
|
||||
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
|
||||
// clusterList, err := argoDB.ListClusters(ctx)
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, applicationSet.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save current applications to be able to delete the ones that are not in appList
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
|
||||
|
||||
for _, app := range desiredApplications {
|
||||
m[app.Name] = true
|
||||
}
|
||||
|
||||
// Delete apps that are not in m[string]bool
|
||||
var firstError error
|
||||
for _, app := range current {
|
||||
appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
|
||||
_, exists := m[app.Name]
|
||||
|
||||
if !exists {
|
||||
|
||||
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
|
||||
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
|
||||
if err != nil {
|
||||
appLog.WithError(err).Error("failed to update Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.Client.Delete(ctx, &app)
|
||||
if err != nil {
|
||||
appLog.WithError(err).Error("failed to delete Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
|
||||
appLog.Log(log.InfoLevel, "Deleted application")
|
||||
}
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// removeFinalizerOnInvalidDestination removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
|
||||
func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, app *argov1alpha1.Application, clusterList *argov1alpha1.ClusterList, appLog *log.Entry) error {
|
||||
|
||||
// Only check if the finalizers need to be removed IF there are finalizers to remove
|
||||
if len(app.Finalizers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validDestination bool
|
||||
|
||||
// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, applicationSet.Namespace); err != nil {
|
||||
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
|
||||
validDestination = false
|
||||
} else {
|
||||
|
||||
// Detect if the destination's server field does not match an existing cluster
|
||||
|
||||
matchingCluster := false
|
||||
for _, cluster := range clusterList.Items {
|
||||
|
||||
// Server fields must match. Note that ValidateDestination ensures that the server field is set, if applicable.
|
||||
if app.Spec.Destination.Server != cluster.Server {
|
||||
continue
|
||||
}
|
||||
|
||||
// The name must match, if it is not empty
|
||||
if app.Spec.Destination.Name != "" && cluster.Name != app.Spec.Destination.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
matchingCluster = true
|
||||
break
|
||||
}
|
||||
|
||||
if !matchingCluster {
|
||||
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name)
|
||||
}
|
||||
|
||||
validDestination = matchingCluster
|
||||
}
|
||||
// If the destination is invalid (for example the cluster is no longer defined), then remove
|
||||
// the application finalizers to avoid triggering Argo CD bug #5817
|
||||
if !validDestination {
|
||||
|
||||
// Filter out the Argo CD finalizer from the finalizer list
|
||||
var newFinalizers []string
|
||||
for _, existingFinalizer := range app.Finalizers {
|
||||
if existingFinalizer != argov1alpha1.ResourcesFinalizerName { // only remove this one
|
||||
newFinalizers = append(newFinalizers, existingFinalizer)
|
||||
}
|
||||
}
|
||||
|
||||
// If the finalizer length changed (due to filtering out an Argo finalizer), update the finalizer list on the app
|
||||
if len(newFinalizers) != len(app.Finalizers) {
|
||||
app.Finalizers = newFinalizers
|
||||
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Updated", "Updated Application %q finalizer before deletion, because application has an invalid destination", app.Name)
|
||||
appLog.Log(log.InfoLevel, "Updating application finalizer before deletion, because application has an invalid destination")
|
||||
|
||||
err := r.Client.Update(ctx, app, &client.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,84 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// clusterSecretEventHandler is used when watching Secrets to check if they are ArgoCD Cluster Secrets, and if so
|
||||
// requeue any related ApplicationSets.
|
||||
type clusterSecretEventHandler struct {
|
||||
//handler.EnqueueRequestForOwner
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.ObjectNew)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
// addRateLimitingInterface defines the Add method of workqueue.RateLimitingInterface, allow us to easily mock
|
||||
// it for testing purposes.
|
||||
type addRateLimitingInterface interface {
|
||||
Add(item interface{})
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingInterface, object client.Object) {
|
||||
|
||||
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
|
||||
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
|
||||
return
|
||||
}
|
||||
|
||||
h.Log.WithFields(log.Fields{
|
||||
"namespace": object.GetNamespace(),
|
||||
"name": object.GetName(),
|
||||
}).Info("processing event for cluster secret")
|
||||
|
||||
appSetList := &argoprojiov1alpha1.ApplicationSetList{}
|
||||
err := h.Client.List(context.Background(), appSetList)
|
||||
if err != nil {
|
||||
h.Log.WithError(err).Error("unable to list ApplicationSets")
|
||||
return
|
||||
}
|
||||
|
||||
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
|
||||
for _, appSet := range appSetList.Items {
|
||||
|
||||
foundClusterGenerator := false
|
||||
for _, generator := range appSet.Spec.Generators {
|
||||
if generator.Clusters != nil {
|
||||
foundClusterGenerator = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundClusterGenerator {
|
||||
|
||||
// TODO: only queue the AppGenerator if the labels match this cluster
|
||||
req := ctrl.Request{NamespacedName: types.NamespacedName{Namespace: appSet.Namespace, Name: appSet.Name}}
|
||||
q.Add(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestClusterEventHandler(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := argoprojiov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = argov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
items []argoprojiov1alpha1.ApplicationSet
|
||||
secret corev1.Secret
|
||||
expectedRequests []ctrl.Request
|
||||
}{
|
||||
{
|
||||
name: "no application sets should mean no requests",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "a cluster generator should produce a request",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{{
|
||||
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "multiple cluster generators should produce multiple requests",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set2",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-cluster generator should not match",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "app-set-non-cluster",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-non-argocd-secret",
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
appSetList := argoprojiov1alpha1.ApplicationSetList{
|
||||
Items: test.items,
|
||||
}
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
|
||||
|
||||
handler := &clusterSecretEventHandler{
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
}
|
||||
|
||||
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
|
||||
|
||||
handler.queueRelatedAppGenerators(&mockAddRateLimitingInterface, &test.secret)
|
||||
|
||||
assert.False(t, mockAddRateLimitingInterface.errorOccurred)
|
||||
assert.ElementsMatch(t, mockAddRateLimitingInterface.addedItems, test.expectedRequests)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add checks the type, and adds it to the internal list of received additions
|
||||
func (obj *mockAddRateLimitingInterface) Add(item interface{}) {
|
||||
if req, ok := item.(ctrl.Request); ok {
|
||||
obj.addedItems = append(obj.addedItems, req)
|
||||
} else {
|
||||
obj.errorOccurred = true
|
||||
}
|
||||
}
|
||||
|
||||
type mockAddRateLimitingInterface struct {
|
||||
errorOccurred bool
|
||||
addedItems []ctrl.Request
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
mockServer := argoCDServiceMock{}
|
||||
ctx := context.Background()
|
||||
scheme := runtime.NewScheme()
|
||||
err := argoappsetv1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{{
|
||||
Group: "mallard.io",
|
||||
Version: "v1",
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
appClientset := kubefake.NewSimpleClientset()
|
||||
k8sClient := fake.NewClientBuilder().Build()
|
||||
duckType := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v2quack",
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "mightyduck",
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "all-species"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "staging-01",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build()),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(terminalGenerators),
|
||||
"Merge": generators.NewMergeGenerator(terminalGenerators),
|
||||
}
|
||||
|
||||
topLevelGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(nestedGenerators),
|
||||
"Merge": generators.NewMergeGenerator(nestedGenerators),
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(0),
|
||||
Generators: topLevelGenerators,
|
||||
}
|
||||
|
||||
type args struct {
|
||||
appset *argoappsetv1alpha1.ApplicationSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want time.Duration
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{name: "Cluster", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{Clusters: &argoappsetv1alpha1.ClusterGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
{name: "ClusterMergeNested", args: args{&argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
|
||||
{Merge: &argoappsetv1alpha1.MergeGenerator{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
|
||||
Git: &argoappsetv1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ClusterMatrixNested", args: args{&argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
|
||||
{Matrix: &argoappsetv1alpha1.MatrixGenerator{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
|
||||
Git: &argoappsetv1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ListGenerator", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{List: &argoappsetv1alpha1.ListGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,57 +0,0 @@
|
||||
# How the Cluster Decision Resource generator works for clusterDecisionResource
|
||||
1. The Cluster Decision Resource generator reads a configurable status format:
|
||||
```yaml
|
||||
status:
|
||||
clusters:
|
||||
- name: cluster-01
|
||||
- name: cluster-02
|
||||
```
|
||||
This is a common status format. Another format that could be read looks like this:
|
||||
```yaml
|
||||
status:
|
||||
decisions:
|
||||
- clusterName: cluster-01
|
||||
namespace: cluster-01
|
||||
- clusterName: cluster-02
|
||||
namespace: cluster-02
|
||||
```
|
||||
2. Any resource that has a list of key / value pairs, where the value matches ArgoCD cluster names can be used.
|
||||
3. The key / value pairs found in each element of the list will be available to the template. As well, `name` and `server` will still be available to the template.
|
||||
4. The Service Account used by the ApplicationSet controller must have access to `Get` the resource you want to retrieve the duck type definition from
|
||||
5. A configMap is used to identify the resource to read status of generated ArgoCD clusters from. You can use multiple resources by creating a ConfigMap for each one in the ArgoCD namespace.
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: my-configmap
|
||||
data:
|
||||
apiVersion: group.io/v1
|
||||
kind: mykinds
|
||||
statusListKey: clusters
|
||||
matchKey: name
|
||||
```
|
||||
* `apiVersion` - This is the apiVersion of your resource
|
||||
* `kind` - This is the plural kind of your resource
|
||||
* `statusListKey` - Default is 'clusters', this is the key found in your resource's status that is a list of ArgoCD clusters.
|
||||
* `matchKey` - Is the key name found in the cluster list, `name` and `clusterName` are the keys in the examples above.
|
||||
|
||||
# Applying the example
|
||||
1. Connect to a cluster with the ApplicationSet controller running
|
||||
2. Edit the Role for the ApplicationSet service account, and grant it permission to `list` the `placementdecisions` resources, from apiGroups `cluster.open-cluster-management.io/v1alpha1`
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- "cluster.open-cluster-management.io/v1alpha1"
|
||||
resources:
|
||||
- placementdecisions
|
||||
verbs:
|
||||
- list
|
||||
```
|
||||
3. Apply the following controller and associated ManagedCluster CRD's:
|
||||
https://github.com/open-cluster-management/placement
|
||||
4. Now apply the PlacementDecision and an ApplicationSet:
|
||||
```bash
|
||||
kubectl apply -f ./placementdecision.yaml
|
||||
kubectl apply -f ./configMap.yaml
|
||||
kubectl apply -f ./ducktype-example.yaml
|
||||
```
|
||||
5. For now this won't do anything until you create a controller that populates the `Status.Decisions` array.
|
||||
@@ -1,11 +0,0 @@
|
||||
# To generate a Status.Decisions from this CRD, requires https://github.com/open-cluster-management/multicloud-operators-placementrule be deployed
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ocm-placement
|
||||
data:
|
||||
apiVersion: apps.open-cluster-management.io/v1
|
||||
kind: placementrules
|
||||
statusListKey: decisions
|
||||
matchKey: clusterName
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: book-import
|
||||
spec:
|
||||
generators:
|
||||
- clusterDecisionResource:
|
||||
configMapRef: ocm-placement
|
||||
name: test-placement
|
||||
requeueAfterSeconds: 30
|
||||
template:
|
||||
metadata:
|
||||
name: '{{clusterName}}-book-import'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/open-cluster-management/application-samples.git
|
||||
targetRevision: HEAD
|
||||
path: book-import
|
||||
destination:
|
||||
name: '{{clusterName}}'
|
||||
namespace: bookimport
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,18 +0,0 @@
|
||||
---
|
||||
apiVersion: apps.open-cluster-management.io/v1
|
||||
kind: PlacementRule
|
||||
metadata:
|
||||
name: test-placement
|
||||
spec:
|
||||
clusterReplicas: 1 # Availability choice, maximum number of clusters to provision at once
|
||||
clusterSelector:
|
||||
matchLabels:
|
||||
'usage': 'development'
|
||||
clusterConditions:
|
||||
- type: ManagedClusterConditionAvailable
|
||||
status: "True"
|
||||
# Below is sample output the generator can consume.
|
||||
status:
|
||||
decisions:
|
||||
- clusterName: cluster-01
|
||||
- clusterName: cluster-02
|
||||
@@ -1,22 +0,0 @@
|
||||
# This is an example of a typical ApplicationSet which uses the cluster generator.
|
||||
# An ApplicationSet is comprised with two stanzas:
|
||||
# - spec.generator - producer of a list of values supplied as arguments to an app template
|
||||
# - spec.template - an application template, which has been parameterized
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
chart: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,33 +0,0 @@
|
||||
# The cluster generator produces an items list from all clusters registered to Argo CD.
|
||||
# It automatically provides the following fields as values to the app template:
|
||||
# - name
|
||||
# - server
|
||||
# - metadata.labels.<key>
|
||||
# - metadata.annotations.<key>
|
||||
# - values.<key>
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
values:
|
||||
project: default
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
labels:
|
||||
environment: '{{metadata.labels.environment}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
chart: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,44 +0,0 @@
|
||||
# This example demonstrates the git directory generator, which produces an items list
|
||||
# based on discovery of directories in a git repo matching a specified pattern.
|
||||
# Git generators automatically provide {{path}} and {{path.basename}} as available
|
||||
# variables to the app template.
|
||||
#
|
||||
# Suppose the following git directory structure (note the use of different config tools):
|
||||
#
|
||||
# cluster-deployments
|
||||
# └── add-ons
|
||||
# ├── argo-rollouts
|
||||
# │ ├── all.yaml
|
||||
# │ └── kustomization.yaml
|
||||
# ├── argo-workflows
|
||||
# │ └── install.yaml
|
||||
# ├── grafana
|
||||
# │ ├── Chart.yaml
|
||||
# │ └── values.yaml
|
||||
# └── prometheus-operator
|
||||
# ├── Chart.yaml
|
||||
# └── values.yaml
|
||||
#
|
||||
# The following ApplicationSet would produce four applications (in different namespaces),
|
||||
# using the directory basename as both the namespace and application name.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
directories:
|
||||
- path: add-ons/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: http://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,55 +0,0 @@
|
||||
# This example demonstrates a git file generator which traverses the directory structure of a git
|
||||
# repository to discover items based on a filename convention. For each file discovered, the
|
||||
# contents of the discovered files themselves, act as the set of inputs to the app template.
|
||||
#
|
||||
# Suppose the following git directory structure:
|
||||
#
|
||||
# cluster-deployments
|
||||
# ├── apps
|
||||
# │ └── guestbook
|
||||
# │ └── install.yaml
|
||||
# └── cluster-config
|
||||
# ├── engineering
|
||||
# │ ├── dev
|
||||
# │ │ └── config.json
|
||||
# │ └── prod
|
||||
# │ └── config.json
|
||||
# └── finance
|
||||
# ├── dev
|
||||
# │ └── config.json
|
||||
# └── prod
|
||||
# └── config.json
|
||||
#
|
||||
# The discovered files (e.g. config.json) files can be any structured data supplied to the
|
||||
# generated application. e.g.:
|
||||
# {
|
||||
# "aws_account": "123456",
|
||||
# "asset_id": "11223344"
|
||||
# "cluster": {
|
||||
# "owner": "Jesse_Suen@intuit.com",
|
||||
# "name": "engineering-dev",
|
||||
# "address": "http://1.2.3.4"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: "**/config.json"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: apps/guestbook
|
||||
destination:
|
||||
server: '{{cluster.address}}'
|
||||
namespace: guestbook
|
||||
@@ -1,68 +0,0 @@
|
||||
# This example demonstrates a git file generator which produces its items based on one or
|
||||
# more files referenced in a git repo. The referenced files would contain a json/yaml list of
|
||||
# arbitrary structured objects. Each item of the list would become a set of parameters to a
|
||||
# generated application.
|
||||
#
|
||||
# Suppose the following git directory structure:
|
||||
#
|
||||
# cluster-deployments
|
||||
# ├── apps
|
||||
# │ └── guestbook
|
||||
# │ ├── v1.0
|
||||
# │ │ └── install.yaml
|
||||
# │ └── v2.0
|
||||
# │ └── install.yaml
|
||||
# └── config
|
||||
# └── clusters.json
|
||||
#
|
||||
# In this example, the `clusters.json` file is json list of structured data:
|
||||
# [
|
||||
# {
|
||||
# "account": "123456",
|
||||
# "asset_id": "11223344",
|
||||
# "cluster": {
|
||||
# "owner": "Jesse_Suen@intuit.com",
|
||||
# "name": "engineering-dev",
|
||||
# "address": "http://1.2.3.4"
|
||||
# },
|
||||
# "appVersions": {
|
||||
# "prometheus-operator": "v0.38",
|
||||
# "guestbook": "v2.0"
|
||||
# }
|
||||
# },
|
||||
# {
|
||||
# "account": "456789",
|
||||
# "asset_id": "55667788",
|
||||
# "cluster": {
|
||||
# "owner": "Alexander_Matyushentsev@intuit.com",
|
||||
# "name": "engineering-prod",
|
||||
# "address": "http://2.4.6.8"
|
||||
# },
|
||||
# "appVersions": {
|
||||
# "prometheus-operator": "v0.38",
|
||||
# "guestbook": "v1.0"
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: config/clusters.json
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: apps/guestbook/{{appVersions.guestbook}}
|
||||
destination:
|
||||
server: http://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
@@ -1,33 +0,0 @@
|
||||
# The list generator specifies a literal list of argument values to the app spec template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
values:
|
||||
project: dev
|
||||
- cluster: engineering-prod
|
||||
url: https://2.4.6.8
|
||||
values:
|
||||
project: prod
|
||||
- cluster: finance-preprod
|
||||
url: https://9.8.7.6
|
||||
values:
|
||||
project: preprod
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,3 +0,0 @@
|
||||
# Proposal Examples
|
||||
This directory contains examples that are not yet implemented.
|
||||
They are part of the project to indicate future progress, and we are welcome any contribution that will add an implementation
|
||||
@@ -1,48 +0,0 @@
|
||||
# For all generators, filters can be applied to reduce the generated items to a smaller subset.
|
||||
# A powerful set of filter expressions are supported using syntax provided by the
|
||||
# https://github.com/antonmedv/expr library. Examples expressions are demonstrated below
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
# Match all clusters who meet ALL of the following conditions:
|
||||
# 1. name matches the regex `sales-.*`
|
||||
# 2. environment label is either 'staging' or 'prod'
|
||||
- clusters:
|
||||
filters:
|
||||
- expr: '{{name}} matches "sales-.*"'
|
||||
- expr: '{{metadata.labels.environment}} in [staging, prod]'
|
||||
values:
|
||||
version: '2.0.0'
|
||||
# Filter items from `config/clusters.json` in the `cluster-deployments` git repo,
|
||||
# to only those having the `cluster.enabled == true` property. e.g.:
|
||||
# {
|
||||
# ...
|
||||
# "cluster": {
|
||||
# "enabled": true,
|
||||
# ...
|
||||
# }
|
||||
# }
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: config/clusters.json
|
||||
filters:
|
||||
- expr: '{{cluster.enabled}} == true'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: "{{values.version}}"
|
||||
chart: guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: foo
|
||||
value: "{{metadata.annotations.foo}}"
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,48 +0,0 @@
|
||||
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
|
||||
# useful to do this in order to override the spec.template stanza, and when simple string
|
||||
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
|
||||
# a partial definition, which overrides/patch the default template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "project"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
path: '{{cluster}}-override'
|
||||
destination: {}
|
||||
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-prod
|
||||
url: https://1.2.3.4
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "project2"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
path: '{{cluster}}-override2'
|
||||
destination: {}
|
||||
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: "project"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,6 +0,0 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,4 +0,0 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 40.5.0
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -1 +0,0 @@
|
||||
# Blank values.yaml
|
||||
@@ -1,6 +0,0 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,23 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-guestbook
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,19 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "helm-guestbook.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "helm-guestbook.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "helm-guestbook.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "helm-guestbook.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "helm-guestbook.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
chart: {{ template "helm-guestbook.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "helm-guestbook.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
chart: {{ template "helm-guestbook.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,2 +0,0 @@
|
||||
service:
|
||||
type: LoadBalancer
|
||||
@@ -1,45 +0,0 @@
|
||||
# Default values for helm-guestbook.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: gcr.io/heptio-images/ks-guestbook-demo
|
||||
tag: 0.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,4 +0,0 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 40.5.0
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -1 +0,0 @@
|
||||
# Blank values.yaml
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
namespace: argocd
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/git-generator-directory/excludes/cluster-addons/*
|
||||
- exclude: true
|
||||
path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
namespace: argocd
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/git-generator-directory/cluster-addons/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,7 +0,0 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"aws_account": "123456",
|
||||
"asset_id": "11223344",
|
||||
"cluster": {
|
||||
"owner": "cluster-admin@company.com",
|
||||
"name": "engineering-dev",
|
||||
"address": "http://1.2.3.4"
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"aws_account": "123456",
|
||||
"asset_id": "11223344",
|
||||
"cluster": {
|
||||
"owner": "cluster-admin@company.com",
|
||||
"name": "engineering-prod",
|
||||
"address": "http://1.2.3.4"
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
files:
|
||||
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: "applicationset/examples/git-generator-files-discovery/apps/guestbook"
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
#server: '{{cluster.address}}'
|
||||
namespace: guestbook
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/list-generator/guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,6 +0,0 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,417 +0,0 @@
|
||||
# This is an auto-generated file. DO NOT EDIT
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterworkflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: ClusterWorkflowTemplate
|
||||
listKind: ClusterWorkflowTemplateList
|
||||
plural: clusterworkflowtemplates
|
||||
shortNames:
|
||||
- clusterwftmpl
|
||||
- cwft
|
||||
singular: clusterworkflowtemplate
|
||||
scope: Cluster
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cronworkflows.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: CronWorkflow
|
||||
listKind: CronWorkflowList
|
||||
plural: cronworkflows
|
||||
shortNames:
|
||||
- cwf
|
||||
- cronwf
|
||||
singular: cronworkflow
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workfloweventbindings.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowEventBinding
|
||||
listKind: WorkflowEventBindingList
|
||||
plural: workfloweventbindings
|
||||
shortNames:
|
||||
- wfeb
|
||||
singular: workfloweventbinding
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflows.argoproj.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .status.phase
|
||||
description: Status of the workflow
|
||||
name: Status
|
||||
type: string
|
||||
- JSONPath: .status.startedAt
|
||||
description: When the workflow was started
|
||||
format: date-time
|
||||
name: Age
|
||||
type: date
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: Workflow
|
||||
listKind: WorkflowList
|
||||
plural: workflows
|
||||
shortNames:
|
||||
- wf
|
||||
singular: workflow
|
||||
scope: Namespaced
|
||||
subresources: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowTemplate
|
||||
listKind: WorkflowTemplateList
|
||||
plural: workflowtemplates
|
||||
shortNames:
|
||||
- wftmpl
|
||||
singular: workflowtemplate
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- create
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflowtemplates
|
||||
- workflowtemplates/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-server-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- watch
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workfloweventbindings
|
||||
- workflowtemplates
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-server-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-server-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: workflow-controller-configmap
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 2746
|
||||
targetPort: 2746
|
||||
selector:
|
||||
app: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: workflow-controller-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: 9090
|
||||
selector:
|
||||
app: workflow-controller
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: argo-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: argo-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
- --namespaced
|
||||
image: argoproj/argocli:v2.12.5
|
||||
name: argo-server
|
||||
ports:
|
||||
- containerPort: 2746
|
||||
name: web
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 2746
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: workflow-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: workflow-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: workflow-controller
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --configmap
|
||||
- workflow-controller-configmap
|
||||
- --executor-image
|
||||
- argoproj/argoexec:v2.12.5
|
||||
- --namespaced
|
||||
command:
|
||||
- workflow-controller
|
||||
image: argoproj/workflow-controller:v2.12.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
name: workflow-controller
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: metrics
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,4 +0,0 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 9.4.10
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -1 +0,0 @@
|
||||
# Blank values.yaml
|
||||
@@ -1,33 +0,0 @@
|
||||
# This example demonstrates the combining of the git generator with a cluster generator
|
||||
# The expected output would be an application per git directory and a cluster (application_count = git directory * clusters)
|
||||
#
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/matrix/cluster-addons/*
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}-{{name}}'
|
||||
spec:
|
||||
project: '{{metadata.labels.environment}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,39 +0,0 @@
|
||||
# This example demonstrates the combining of the git generator with a list generator
|
||||
# The expected output would be an application per git directory and a list entry (application_count = git directory * list entries)
|
||||
#
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: list-git
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/matrix/cluster-addons/*
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
values:
|
||||
project: dev
|
||||
- cluster: engineering-prod
|
||||
url: https://2.4.6.8
|
||||
values:
|
||||
project: prod
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}-{{cluster}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,37 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: list-and-list
|
||||
namespace: argocd
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
suffix: '1'
|
||||
- values:
|
||||
suffix: '2'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-{{values.suffix}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,67 +0,0 @@
|
||||
# The matrix generator can contain other combination-type generators (matrix and union). But nested matrix and union
|
||||
# generators cannot contain further-nested matrix or union generators.
|
||||
#
|
||||
# The generators are evaluated from most-nested to least-nested. In this case:
|
||||
# 1. The union generator joins two lists to make 3 parameter sets.
|
||||
# 2. The inner matrix generator takes the cartesian product of the two lists to make 4 parameters sets.
|
||||
# 3. The outer matrix generator takes the cartesian product of the 3 union and the 4 inner matrix parameter sets to
|
||||
# make 3*4=12 final parameter sets.
|
||||
# 4. The 12 final parameter sets are evaluated against the top-level template to generate 12 Applications.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: matrix-and-union-in-matrix
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- union:
|
||||
mergeKeys:
|
||||
- cluster
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-test
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- matrix:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
suffix: '1'
|
||||
- values:
|
||||
suffix: '2'
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
prefix: 'first'
|
||||
- values:
|
||||
prefix: 'second'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{values.prefix}}-{{cluster}}-{{values.suffix}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,44 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: merge-clusters-and-list
|
||||
spec:
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
- server
|
||||
generators:
|
||||
- clusters:
|
||||
values:
|
||||
kafka: 'true'
|
||||
redis: 'false'
|
||||
# For clusters with a specific label, enable Kafka.
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
use-kafka: 'false'
|
||||
values:
|
||||
kafka: 'false'
|
||||
# For a specific cluster, enable Redis.
|
||||
- list:
|
||||
elements:
|
||||
- server: https://some-specific-cluster
|
||||
values.redis: 'true'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: kafka
|
||||
value: '{{values.kafka}}'
|
||||
- name: redis
|
||||
value: '{{values.redis}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: default
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: merge-two-matrixes
|
||||
spec:
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
- server
|
||||
- environment
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- clusters:
|
||||
values:
|
||||
replicaCount: '2'
|
||||
- list:
|
||||
elements:
|
||||
- environment: staging
|
||||
namespace: guestbook-non-prod
|
||||
- environment: prod
|
||||
namespace: guestbook
|
||||
- list:
|
||||
elements:
|
||||
- server: https://kubernetes.default.svc
|
||||
environment: staging
|
||||
values.replicaCount: '1'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook-{{environment}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: replicaCount
|
||||
value: '{{values.replicaCount}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: '{{namespace}}'
|
||||
@@ -1,40 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: myapp
|
||||
spec:
|
||||
generators:
|
||||
- pullRequest:
|
||||
github:
|
||||
# The GitHub organization or user.
|
||||
owner: myorg
|
||||
# The Github repository
|
||||
repo: myrepo
|
||||
# For GitHub Enterprise. (optional)
|
||||
api: https://git.example.com/
|
||||
# Reference to a Secret containing an access token. (optional)
|
||||
tokenRef:
|
||||
secretName: github-token
|
||||
key: token
|
||||
# Labels is used to filter the PRs that you want to target. (optional)
|
||||
labels:
|
||||
- preview
|
||||
template:
|
||||
metadata:
|
||||
name: 'myapp-{{ branch }}-{{ number }}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: 'https://github.com/myorg/myrepo.git'
|
||||
targetRevision: '{{ head_sha }}'
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: "image.tag"
|
||||
value: "pull-{{ head_sha }}"
|
||||
project: default
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: "{{ branch }}-{{ number }}"
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- scmProvider:
|
||||
github:
|
||||
organization: argoproj
|
||||
cloneProtocol: https
|
||||
filters:
|
||||
- repositoryMatch: example-apps
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ repository }}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: '{{ url }}'
|
||||
targetRevision: '{{ branch }}'
|
||||
path: guestbook
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,7 +0,0 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,7 +0,0 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,36 +0,0 @@
|
||||
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
|
||||
# useful to do this in order to override the spec.template stanza, and when simple string
|
||||
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
|
||||
# a partial definition, which overrides/patch the default template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
targetRevision: HEAD
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
path: 'applicationset/examples/template-override/{{cluster}}-override'
|
||||
destination: {}
|
||||
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/template-override/default
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,186 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type"
|
||||
ArgoCDSecretTypeCluster = "cluster"
|
||||
)
|
||||
|
||||
var _ Generator = (*ClusterGenerator)(nil)
|
||||
|
||||
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type ClusterGenerator struct {
|
||||
client.Client
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
// namespace is the Argo CD namespace
|
||||
namespace string
|
||||
settingsManager *settings.SettingsManager
|
||||
}
|
||||
|
||||
func NewClusterGenerator(c client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator {
|
||||
|
||||
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
|
||||
|
||||
g := &ClusterGenerator{
|
||||
Client: c,
|
||||
ctx: ctx,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
settingsManager: settingsManager,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// GetRequeueAfter never requeue the cluster generator because the `clusterSecretEventHandler` will requeue the appsets
|
||||
// when the cluster secrets change
|
||||
func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) *argoappsetv1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Clusters.Template
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GenerateParams(
|
||||
appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, _ *argoappsetv1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.Clusters == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// Do not include the local cluster in the cluster parameters IF there is a non-empty selector
|
||||
// - Since local clusters do not have secrets, they do not have labels to match against
|
||||
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
clusterSecrets, err := g.getSecretsByClusterName(appSetGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
|
||||
for _, cluster := range clustersFromArgoCD.Items {
|
||||
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
|
||||
secretsFound = append(secretsFound, secretForCluster)
|
||||
|
||||
} else if !ignoreLocalClusters {
|
||||
// If there is no secret for the cluster, it's the local cluster, so handle it here.
|
||||
params := map[string]string{}
|
||||
params["name"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
|
||||
for key, value := range appSetGenerator.Clusters.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
|
||||
log.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range secretsFound {
|
||||
params := map[string]string{}
|
||||
params["name"] = string(cluster.Data["name"])
|
||||
params["nameNormalized"] = sanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
for key, value := range cluster.ObjectMeta.Annotations {
|
||||
params[fmt.Sprintf("metadata.annotations.%s", key)] = value
|
||||
}
|
||||
for key, value := range cluster.ObjectMeta.Labels {
|
||||
params[fmt.Sprintf("metadata.labels.%s", key)] = value
|
||||
}
|
||||
for key, value := range appSetGenerator.Clusters.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
log.WithField("cluster", cluster.Name).Info("matched cluster secret")
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
|
||||
// List all Clusters:
|
||||
clusterSecretList := &corev1.SecretList{}
|
||||
|
||||
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster)
|
||||
secretSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := g.Client.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("clusters matching labels", "count", len(clusterSecretList.Items))
|
||||
|
||||
res := map[string]corev1.Secret{}
|
||||
|
||||
for _, cluster := range clusterSecretList.Items {
|
||||
clusterName := string(cluster.Data["name"])
|
||||
|
||||
res[clusterName] = cluster
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
// sanitize the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func sanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
@@ -1,254 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"testing"
|
||||
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type possiblyErroringFakeCtrlRuntimeClient struct {
|
||||
client.Client
|
||||
shouldError bool
|
||||
}
|
||||
|
||||
func (p *possiblyErroringFakeCtrlRuntimeClient) List(ctx context.Context, secretList client.ObjectList, opts ...client.ListOption) error {
|
||||
if p.shouldError {
|
||||
return fmt.Errorf("could not list Secrets")
|
||||
}
|
||||
return p.Client.List(ctx, secretList, opts...)
|
||||
}
|
||||
|
||||
func TestGenerateParams(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "staging-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("staging-01"),
|
||||
"server": []byte("https://staging-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "production-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production_01/west"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
selector metav1.LabelSelector
|
||||
values map[string]string
|
||||
expected []map[string]string
|
||||
// clientError is true if a k8s client error should be simulated
|
||||
clientError bool
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "no label selector",
|
||||
selector: metav1.LabelSelector{},
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
|
||||
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
|
||||
{"name": "in-cluster", "server": "https://kubernetes.default.svc"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "secret type label selector",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
},
|
||||
},
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
|
||||
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging with match labels",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
MatchLabels: map[string]string{
|
||||
"org": "foo",
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"name": "baz",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.name": "baz", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "simulate client error",
|
||||
selector: metav1.LabelSelector{},
|
||||
values: nil,
|
||||
expected: nil,
|
||||
clientError: true,
|
||||
expectedError: fmt.Errorf("could not list Secrets"),
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace")
|
||||
|
||||
got, err := clusterGenerator.GenerateParams(&argoappsetv1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoappsetv1alpha1.ClusterGenerator{
|
||||
Selector: testCase.selector,
|
||||
Values: testCase.values,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeClusterName(t *testing.T) {
|
||||
t.Run("valid DNS-1123 subdomain name", func(t *testing.T) {
|
||||
assert.Equal(t, "cluster-name", sanitizeName("cluster-name"))
|
||||
})
|
||||
t.Run("invalid DNS-1123 subdomain name", func(t *testing.T) {
|
||||
invalidName := "-.--CLUSTER/name -./.-"
|
||||
assert.Equal(t, "cluster-name", sanitizeName(invalidName))
|
||||
})
|
||||
}
|
||||
@@ -1,229 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*DuckTypeGenerator)(nil)
|
||||
|
||||
// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type DuckTypeGenerator struct {
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
settingsManager *settings.SettingsManager
|
||||
}
|
||||
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator {
|
||||
|
||||
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
|
||||
|
||||
g := &DuckTypeGenerator{
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
settingsManager: settingsManager,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
|
||||
// Return a requeue default of 3 minutes, if no override is specified.
|
||||
|
||||
if appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.ClusterDecisionResource.Template
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// Not likely to happen
|
||||
if appSetGenerator.ClusterDecisionResource == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the configMapRef
|
||||
cm, err := g.clientset.CoreV1().ConfigMaps(g.namespace).Get(g.ctx, appSetGenerator.ClusterDecisionResource.ConfigMapRef, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract GVK data for the dynamic client to use
|
||||
versionIdx := strings.Index(cm.Data["apiVersion"], "/")
|
||||
kind := cm.Data["kind"]
|
||||
resourceName := appSetGenerator.ClusterDecisionResource.Name
|
||||
labelSelector := appSetGenerator.ClusterDecisionResource.LabelSelector
|
||||
|
||||
log.WithField("kind.apiVersion", kind+"."+cm.Data["apiVersion"]).Info("Kind.Group/Version Reference")
|
||||
|
||||
// Validate the fields
|
||||
if kind == "" || versionIdx < 1 {
|
||||
log.Warningf("kind=%v, resourceName=%v, versionIdx=%v", kind, resourceName, versionIdx)
|
||||
return nil, fmt.Errorf("There is a problem with the apiVersion, kind or resourceName provided")
|
||||
}
|
||||
|
||||
if (resourceName == "" && labelSelector.MatchLabels == nil && labelSelector.MatchExpressions == nil) ||
|
||||
(resourceName != "" && (labelSelector.MatchExpressions != nil || labelSelector.MatchLabels != nil)) {
|
||||
|
||||
log.Warningf("You must choose either resourceName=%v, labelSelector.matchLabels=%v or labelSelect.matchExpressions=%v", resourceName, labelSelector.MatchLabels, labelSelector.MatchExpressions)
|
||||
return nil, fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator")
|
||||
}
|
||||
|
||||
// Split up the apiVersion
|
||||
group := cm.Data["apiVersion"][0:versionIdx]
|
||||
version := cm.Data["apiVersion"][versionIdx+1:]
|
||||
log.WithField("kind.group.version", kind+"."+group+"/"+version).Debug("decoded Ref")
|
||||
|
||||
duckGVR := schema.GroupVersionResource{Group: group, Version: version, Resource: kind}
|
||||
|
||||
listOptions := metav1.ListOptions{}
|
||||
if resourceName == "" {
|
||||
listOptions.LabelSelector = metav1.FormatLabelSelector(&labelSelector)
|
||||
log.WithField("listOptions.LabelSelector", listOptions.LabelSelector).Info("selection type")
|
||||
} else {
|
||||
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", resourceName).String()
|
||||
//metav1.Convert_fields_Selector_To_string(fields.).Sprintf("metadata.name=%s", resourceName)
|
||||
log.WithField("listOptions.FieldSelector", listOptions.FieldSelector).Info("selection type")
|
||||
}
|
||||
|
||||
duckResources, err := g.dynClient.Resource(duckGVR).Namespace(g.namespace).List(g.ctx, listOptions)
|
||||
|
||||
if err != nil {
|
||||
log.WithField("GVK", duckGVR).Warning("resources were not found")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(duckResources.Items) == 0 {
|
||||
log.Warning("no resource found, make sure you clusterDecisionResource is defined correctly")
|
||||
return nil, fmt.Errorf("no clusterDecisionResources found")
|
||||
}
|
||||
|
||||
// Override the duck type in the status of the resource
|
||||
statusListKey := "clusters"
|
||||
|
||||
matchKey := cm.Data["matchKey"]
|
||||
|
||||
if cm.Data["statusListKey"] != "" {
|
||||
statusListKey = cm.Data["statusListKey"]
|
||||
}
|
||||
if matchKey == "" {
|
||||
log.WithField("matchKey", matchKey).Warning("matchKey not found in " + cm.Name)
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
clusterDecisions := []interface{}{}
|
||||
|
||||
// Build the decision slice
|
||||
for _, duckResource := range duckResources.Items {
|
||||
log.WithField("duckResourceName", duckResource.GetName()).Debug("found resource")
|
||||
|
||||
if duckResource.Object["status"] == nil || len(duckResource.Object["status"].(map[string]interface{})) == 0 {
|
||||
log.Warningf("clusterDecisionResource: %s, has no status", duckResource.GetName())
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("duckResourceStatus", duckResource.Object["status"]).Debug("found resource")
|
||||
|
||||
clusterDecisions = append(clusterDecisions, duckResource.Object["status"].(map[string]interface{})[statusListKey].([]interface{})...)
|
||||
|
||||
}
|
||||
log.Infof("Number of decisions found: %v", len(clusterDecisions))
|
||||
|
||||
// Read this outside the loop to improve performance
|
||||
argoClusters := clustersFromArgoCD.Items
|
||||
|
||||
if len(clusterDecisions) > 0 {
|
||||
for _, cluster := range clusterDecisions {
|
||||
|
||||
// generated instance of cluster params
|
||||
params := map[string]string{}
|
||||
|
||||
log.Infof("cluster: %v", cluster)
|
||||
matchValue := cluster.(map[string]interface{})[matchKey]
|
||||
if matchValue == nil || matchValue.(string) == "" {
|
||||
log.Warningf("matchKey=%v not found in \"%v\" list: %v\n", matchKey, statusListKey, cluster.(map[string]interface{}))
|
||||
continue
|
||||
}
|
||||
|
||||
strMatchValue := matchValue.(string)
|
||||
log.WithField(matchKey, strMatchValue).Debug("validate against ArgoCD")
|
||||
|
||||
found := false
|
||||
|
||||
for _, argoCluster := range argoClusters {
|
||||
if argoCluster.Name == strMatchValue {
|
||||
|
||||
log.WithField(matchKey, argoCluster.Name).Info("matched cluster in ArgoCD")
|
||||
params["name"] = argoCluster.Name
|
||||
params["server"] = argoCluster.Server
|
||||
|
||||
found = true
|
||||
break // Stop looking
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.WithField(matchKey, strMatchValue).Warning("unmatched cluster in ArgoCD")
|
||||
continue
|
||||
}
|
||||
|
||||
for key, value := range cluster.(map[string]interface{}) {
|
||||
params[key] = value.(string)
|
||||
}
|
||||
|
||||
for key, value := range appSetGenerator.ClusterDecisionResource.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("clusterDecisionResource status." + statusListKey + " missing")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,315 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
const resourceApiVersion = "mallard.io/v1"
|
||||
const resourceKind = "ducks"
|
||||
const resourceName = "quak"
|
||||
|
||||
func TestGenerateParamsForDuckType(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "staging-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("staging-01"),
|
||||
"server": []byte("https://staging-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "production-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production-01"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
|
||||
duckType := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "all-species"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "staging-01",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
duckTypeProdOnly := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "spotted"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
duckTypeEmpty := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "canvasback"},
|
||||
},
|
||||
"status": map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
configMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-configmap",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": resourceKind,
|
||||
"statusListKey": "decisions",
|
||||
"matchKey": "clusterName",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
configMapRef string
|
||||
resourceName string
|
||||
labelSelector metav1.LabelSelector
|
||||
resource *unstructured.Unstructured
|
||||
values map[string]string
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "no duck resource",
|
||||
resourceName: "",
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
|
||||
},
|
||||
/*** This does not work with the FAKE runtime client, fieldSelectors are broken.
|
||||
{
|
||||
name: "invalid name for duck resource",
|
||||
resourceName: resourceName + "-different",
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("duck.mallard.io \"quak\" not found"),
|
||||
},
|
||||
***/
|
||||
{
|
||||
name: "duck type generator resourceName",
|
||||
resourceName: resourceName,
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only",
|
||||
resourceName: resourceName,
|
||||
resource: duckTypeProdOnly,
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type empty status",
|
||||
resourceName: resourceName,
|
||||
resource: duckTypeEmpty,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type empty status labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "canvasback"}},
|
||||
resource: duckTypeEmpty,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "all-species"}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
resource: duckTypeProdOnly,
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "spotted"}},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator labelSelector.matchExpressions",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "duck",
|
||||
Operator: "In",
|
||||
Values: []string{"all-species", "marbled"},
|
||||
},
|
||||
}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator resourceName and labelSelector.matchExpressions",
|
||||
resourceName: resourceName,
|
||||
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "duck",
|
||||
Operator: "In",
|
||||
Values: []string{"all-species", "marbled"},
|
||||
},
|
||||
}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(append(runtimeClusters, configMap)...)
|
||||
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{{
|
||||
Group: "mallard.io",
|
||||
Version: "v1",
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
var duckTypeGenerator = NewDuckTypeGenerator(context.Background(), fakeDynClient, appClientset, "namespace")
|
||||
|
||||
got, err := duckTypeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
ClusterDecisionResource: &argoprojiov1alpha1.DuckTypeGenerator{
|
||||
ConfigMapRef: "my-configmap",
|
||||
Name: testCase.resourceName,
|
||||
LabelSelector: testCase.labelSelector,
|
||||
Values: testCase.values,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
type TransformResult struct {
|
||||
Params []map[string]string
|
||||
Template argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
//Transform a spec generator to list of paramSets and a template
|
||||
func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, allGenerators map[string]Generator, baseTemplate argoprojiov1alpha1.ApplicationSetTemplate, appSet *argoprojiov1alpha1.ApplicationSet) ([]TransformResult, error) {
|
||||
res := []TransformResult{}
|
||||
var firstError error
|
||||
|
||||
generators := GetRelevantGenerators(&requestedGenerator, allGenerators)
|
||||
for _, g := range generators {
|
||||
// we call mergeGeneratorTemplate first because GenerateParams might be more costly so we want to fail fast if there is an error
|
||||
mergedTemplate, err := mergeGeneratorTemplate(g, &requestedGenerator, baseTemplate)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Error("error generating params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
params, err := g.GenerateParams(&requestedGenerator, appSet)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Error("error generating params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, TransformResult{
|
||||
Params: params,
|
||||
Template: mergedTemplate,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return res, firstError
|
||||
|
||||
}
|
||||
|
||||
func GetRelevantGenerators(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, generators map[string]Generator) []Generator {
|
||||
var res []Generator
|
||||
|
||||
v := reflect.Indirect(reflect.ValueOf(requestedGenerator))
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if !field.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.ValueOf(field.Interface()).IsNil() {
|
||||
res = append(res, generators[v.Type().Field(i).Name])
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) (argoprojiov1alpha1.ApplicationSetTemplate, error) {
|
||||
|
||||
// Make a copy of the value from `GetTemplate()` before merge, rather than copying directly into
|
||||
// the provided parameter (which will touch the original resource object returned by client-go)
|
||||
dest := g.GetTemplate(requestedGenerator).DeepCopy()
|
||||
|
||||
err := mergo.Merge(dest, applicationSetTemplate)
|
||||
|
||||
return *dest, err
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jeremywohl/flatten"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*GitGenerator)(nil)
|
||||
|
||||
type GitGenerator struct {
|
||||
repos services.Repos
|
||||
}
|
||||
|
||||
func NewGitGenerator(repos services.Repos) Generator {
|
||||
g := &GitGenerator{
|
||||
repos: repos,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Git.Template
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
|
||||
// Return a requeue default of 3 minutes, if no default is specified.
|
||||
|
||||
if appSetGenerator.Git.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.Git.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.Git == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
var err error
|
||||
var res []map[string]string
|
||||
if appSetGenerator.Git.Directories != nil {
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator)
|
||||
} else if appSetGenerator.Git.Files != nil {
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator)
|
||||
} else {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) ([]map[string]string, error) {
|
||||
|
||||
// Directories, not files
|
||||
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"allPaths": allPaths,
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
}).Info("applications result from the repo service")
|
||||
|
||||
requestedApps := g.filterApps(appSetGenerator.Git.Directories, allPaths)
|
||||
|
||||
res := g.generateParamsFromApps(requestedApps, appSetGenerator)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) ([]map[string]string, error) {
|
||||
|
||||
// Get all files that match the requested path string, removing duplicates
|
||||
allFiles := make(map[string][]byte)
|
||||
for _, requestedPath := range appSetGenerator.Git.Files {
|
||||
files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for filePath, content := range files {
|
||||
allFiles[filePath] = content
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the unduplicated map into a list, and sort by path to ensure a deterministic
|
||||
// processing order in the subsequent step
|
||||
allPaths := []string{}
|
||||
for path := range allFiles {
|
||||
allPaths = append(allPaths, path)
|
||||
}
|
||||
sort.Strings(allPaths)
|
||||
|
||||
// Generate params from each path, and return
|
||||
res := []map[string]string{}
|
||||
for _, path := range allPaths {
|
||||
|
||||
// A JSON / YAML file path can contain multiple sets of parameters (ie it is an array)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process file '%s': %v", path, err)
|
||||
}
|
||||
|
||||
for index := range paramsArray {
|
||||
res = append(res, paramsArray[index])
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte) ([]map[string]string, error) {
|
||||
objectsFound := []map[string]interface{}{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
err := yaml.Unmarshal(fileContent, &objectsFound)
|
||||
if err != nil {
|
||||
// If unable to parse as an array, attempt to parse as a single object
|
||||
singleObj := make(map[string]interface{})
|
||||
err = yaml.Unmarshal(fileContent, &singleObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse file: %v", err)
|
||||
}
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
// Flatten all objects found, and return them
|
||||
for _, objectFound := range objectsFound {
|
||||
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params := map[string]string{}
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
params["path"] = path.Dir(filePath)
|
||||
params["path.basename"] = path.Base(params["path"])
|
||||
params["path.filename"] = path.Base(filePath)
|
||||
params["path.basenameNormalized"] = sanitizeName(path.Base(params["path"]))
|
||||
params["path.filenameNormalized"] = sanitizeName(path.Base(params["path.filename"]))
|
||||
for k, v := range strings.Split(params["path"], "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryGeneratorItem, allPaths []string) []string {
|
||||
res := []string{}
|
||||
for _, appPath := range allPaths {
|
||||
appInclude := false
|
||||
appExclude := false
|
||||
// Iterating over each appPath and check whether directories object has requestedPath that matches the appPath
|
||||
for _, requestedPath := range Directories {
|
||||
match, err := path.Match(requestedPath.Path, appPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedPath", requestedPath).
|
||||
WithField("appPath", appPath).Error("error while matching appPath to requestedPath")
|
||||
continue
|
||||
}
|
||||
if match && !requestedPath.Exclude {
|
||||
appInclude = true
|
||||
}
|
||||
if match && requestedPath.Exclude {
|
||||
appExclude = true
|
||||
}
|
||||
}
|
||||
// Whenever there is a path with exclude: true it wont be included, even if it is included in a different path pattern
|
||||
if appInclude && !appExclude {
|
||||
res = append(res, appPath)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argoprojiov1alpha1.ApplicationSetGenerator) []map[string]string {
|
||||
// TODO: At some point, the appicationSetGenerator param should be used
|
||||
|
||||
res := make([]map[string]string, len(requestedApps))
|
||||
for i, a := range requestedApps {
|
||||
|
||||
params := make(map[string]string, 2)
|
||||
params["path"] = a
|
||||
params["path.basename"] = path.Base(a)
|
||||
params["path.basenameNormalized"] = sanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params["path"], "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
res[i] = params
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
@@ -1,498 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// type clientSet struct {
|
||||
// RepoServerServiceClient apiclient.RepoServerServiceClient
|
||||
// }
|
||||
|
||||
// func (c *clientSet) NewRepoServerClient() (io.Closer, apiclient.RepoServerServiceClient, error) {
|
||||
// return io.NewCloser(func() error { return nil }), c.RepoServerServiceClient, nil
|
||||
// }
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFile(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]string{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"path": "path/dir",
|
||||
"path.basename": "dir",
|
||||
"path.filename": "file_name.yaml",
|
||||
"path.basenameNormalized": "dir",
|
||||
"path.filenameNormalized": "file-name.yaml",
|
||||
"path[0]": "path",
|
||||
"path[1]": "dir",
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "path[0]": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "path[0]": "app2"},
|
||||
{"path": "app_3", "path.basename": "app_3", "path.basenameNormalized": "app-3", "path[0]": "app_3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"p1/app2",
|
||||
"p1/p2/app3",
|
||||
"p1/p2/p3/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "p1/app2", "path.basename": "app2", "path[0]": "p1", "path[1]": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p1/p2/app3", "path.basename": "app3", "path[0]": "p1", "path[1]": "p2", "path[2]": "app3", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths with Exclude",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*", Exclude: true}, {Path: "*"}, {Path: "*/*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"p1/app2",
|
||||
"p1/app3",
|
||||
"p2/app3",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path[0]": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path[1]": "app3", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Expecting same exclude behavior with different order",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}, {Path: "*/*"}, {Path: "p1/*", Exclude: true}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"p1/app2",
|
||||
"p1/app3",
|
||||
"p2/app3",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path[0]": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path[1]": "app3", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles empty response from repo server",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles error from repo server",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{},
|
||||
repoError: fmt.Errorf("error"),
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], nil)
|
||||
|
||||
if testCaseCopy.expectedError != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
// files is the list of paths/globs to match
|
||||
files []argoprojiov1alpha1.GitFileGeneratorItem
|
||||
// repoFileContents maps repo path to the literal contents of that path
|
||||
repoFileContents map[string][]byte
|
||||
// if repoPathsError is non-nil, the call to GetPaths(...) will return this error value
|
||||
repoPathsError error
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow: create params from git files",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "production",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
},
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}`),
|
||||
"cluster-config/staging/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "foo.bar@example.com",
|
||||
"name": "staging",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "foo.bar@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/staging",
|
||||
"path.basename": "staging",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "staging",
|
||||
"path.basenameNormalized": "staging",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles error during getting repo paths",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{},
|
||||
repoPathsError: fmt.Errorf("paths error"),
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("paths error"),
|
||||
},
|
||||
{
|
||||
name: "test invalid JSON file returns error",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`invalid json file`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`
|
||||
[
|
||||
{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "production",
|
||||
"address": "https://kubernetes.default.svc",
|
||||
"inner": {
|
||||
"one" : "two"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "staging",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
}
|
||||
}
|
||||
]`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"cluster.inner.one": "two",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Test YAML flow",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`
|
||||
cluster:
|
||||
owner: john.doe@example.com
|
||||
name: production
|
||||
address: https://kubernetes.default.svc
|
||||
key1: val1
|
||||
key2:
|
||||
key2_1: val2_1
|
||||
key2_2:
|
||||
key2_2_1: val2_2_1
|
||||
`),
|
||||
"cluster-config/staging/config.yaml": []byte(`
|
||||
cluster:
|
||||
owner: foo.bar@example.com
|
||||
name: staging
|
||||
address: https://kubernetes.default.svc
|
||||
`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "foo.bar@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/staging",
|
||||
"path.basename": "staging",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "staging",
|
||||
"path.basenameNormalized": "staging",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "test YAML array",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`
|
||||
- cluster:
|
||||
owner: john.doe@example.com
|
||||
name: production
|
||||
address: https://kubernetes.default.svc
|
||||
inner:
|
||||
one: two
|
||||
- cluster:
|
||||
owner: john.doe@example.com
|
||||
name: staging
|
||||
address: https://kubernetes.default.svc`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"cluster.inner.one": "two",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: testCaseCopy.files,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], nil)
|
||||
fmt.Println(got, err)
|
||||
|
||||
if testCaseCopy.expectedError != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// Generator defines the interface implemented by all ApplicationSet generators.
|
||||
type Generator interface {
|
||||
// GenerateParams interprets the ApplicationSet and generates all relevant parameters for the application template.
|
||||
// The expected / desired list of parameters is returned, it then will be render and reconciled
|
||||
// against the current state of the Applications in the cluster.
|
||||
GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error)
|
||||
|
||||
// GetRequeueAfter is the the generator can controller the next reconciled loop
|
||||
// In case there is more then one generator the time will be the minimum of the times.
|
||||
// In case NoRequeueAfter is empty, it will be ignored
|
||||
GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration
|
||||
|
||||
// GetTemplate returns the inline template from the spec if there is any, or an empty object otherwise
|
||||
GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
var EmptyAppSetGeneratorError = fmt.Errorf("ApplicationSet is empty")
|
||||
var NoRequeueAfter time.Duration
|
||||
|
||||
// DefaultRequeueAfterSeconds is used when GetRequeueAfter is not specified, it is the default time to wait before the next reconcile loop
|
||||
const (
|
||||
DefaultRequeueAfterSeconds = 3 * time.Minute
|
||||
)
|
||||
@@ -1,74 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*ListGenerator)(nil)
|
||||
|
||||
type ListGenerator struct {
|
||||
}
|
||||
|
||||
func NewListGenerator() Generator {
|
||||
g := &ListGenerator{}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.List.Template
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.List == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
res := make([]map[string]string, len(appSetGenerator.List.Elements))
|
||||
|
||||
for i, tmpItem := range appSetGenerator.List.Elements {
|
||||
params := map[string]string{}
|
||||
var element map[string]interface{}
|
||||
err := json.Unmarshal(tmpItem.Raw, &element)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshling list element %v", err)
|
||||
}
|
||||
|
||||
for key, value := range element {
|
||||
if key == "values" {
|
||||
values, ok := (value).(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing values map")
|
||||
}
|
||||
for k, v := range values {
|
||||
value, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing value as string %v", err)
|
||||
}
|
||||
params[fmt.Sprintf("values.%s", k)] = value
|
||||
}
|
||||
} else {
|
||||
v, ok := value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing value as string %v", err)
|
||||
}
|
||||
params[key] = v
|
||||
}
|
||||
}
|
||||
|
||||
res[i] = params
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestGenerateListParams(t *testing.T) {
|
||||
testCases := []struct {
|
||||
elements []apiextensionsv1.JSON
|
||||
expected []map[string]string
|
||||
}{
|
||||
{
|
||||
elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}},
|
||||
expected: []map[string]string{{"cluster": "cluster", "url": "url"}},
|
||||
}, {
|
||||
elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}},
|
||||
expected: []map[string]string{{"cluster": "cluster", "url": "url", "values.foo": "bar"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
var listGenerator = NewListGenerator()
|
||||
|
||||
got, err := listGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: testCase.elements,
|
||||
}}, nil)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*MatrixGenerator)(nil)
|
||||
|
||||
var (
|
||||
ErrMoreThanTwoGenerators = fmt.Errorf("found more than two generators, Matrix support only two")
|
||||
ErrLessThanTwoGenerators = fmt.Errorf("found less than two generators, Matrix support only two")
|
||||
ErrMoreThenOneInnerGenerators = fmt.Errorf("found more than one generator in matrix.Generators")
|
||||
)
|
||||
|
||||
type MatrixGenerator struct {
|
||||
// The inner generators supported by the matrix generator (cluster, git, list...)
|
||||
supportedGenerators map[string]Generator
|
||||
}
|
||||
|
||||
func NewMatrixGenerator(supportedGenerators map[string]Generator) Generator {
|
||||
m := &MatrixGenerator{
|
||||
supportedGenerators: supportedGenerators,
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator.Matrix == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if len(appSetGenerator.Matrix.Generators) < 2 {
|
||||
return nil, ErrLessThanTwoGenerators
|
||||
}
|
||||
|
||||
if len(appSetGenerator.Matrix.Generators) > 2 {
|
||||
return nil, ErrMoreThanTwoGenerators
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
g0, err := m.getParams(appSetGenerator.Matrix.Generators[0], appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
g1, err := m.getParams(appSetGenerator.Matrix.Generators[1], appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, a := range g0 {
|
||||
for _, b := range g1 {
|
||||
val, err := utils.CombineStringMaps(a, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, val)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
matrixGen, err := getMatrixGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: appSetBaseGenerator.List,
|
||||
Clusters: appSetBaseGenerator.Clusters,
|
||||
Git: appSetBaseGenerator.Git,
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
appSet)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("child generator returned an error on parameter generation: %v", err)
|
||||
}
|
||||
|
||||
if len(t) == 0 {
|
||||
return nil, fmt.Errorf("child generator generated no parameters")
|
||||
}
|
||||
|
||||
if len(t) > 1 {
|
||||
return nil, ErrMoreThenOneInnerGenerators
|
||||
}
|
||||
|
||||
return t[0].Params, nil
|
||||
}
|
||||
|
||||
const maxDuration time.Duration = 1<<63 - 1
|
||||
|
||||
func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
res := maxDuration
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Matrix.Generators {
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
for _, g := range generators {
|
||||
temp := g.GetRequeueAfter(base)
|
||||
if temp < res && temp != NoRequeueAfter {
|
||||
found = true
|
||||
res = temp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
return res
|
||||
} else {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getMatrixGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MatrixGenerator, error) {
|
||||
if r.Matrix == nil {
|
||||
return nil, nil
|
||||
}
|
||||
matrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(r.Matrix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return matrix.ToMatrixGenerator(), nil
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Matrix.Template
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestMatrixGenerate(t *testing.T) {
|
||||
|
||||
gitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
}
|
||||
|
||||
listGenerator := &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
expectedErr error
|
||||
expected []map[string]string
|
||||
}{
|
||||
{
|
||||
name: "happy flow - generate params",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "cluster": "Cluster", "url": "Url"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "cluster": "Cluster", "url": "Url"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "happy flow - generate params from two lists",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"a": "1"}`)},
|
||||
{Raw: []byte(`{"a": "2"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"b": "1"}`)},
|
||||
{Raw: []byte(`{"b": "2"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"a": "1", "b": "1"},
|
||||
{"a": "1", "b": "2"},
|
||||
{"a": "2", "b": "1"},
|
||||
{"a": "2", "b": "2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns error if there is less than two base generators",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrLessThanTwoGenerators,
|
||||
},
|
||||
{
|
||||
name: "returns error if there is more than two base generators",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrMoreThanTwoGenerators,
|
||||
},
|
||||
{
|
||||
name: "returns error if there is more than one inner generator in the first base generator",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrMoreThenOneInnerGenerators,
|
||||
},
|
||||
{
|
||||
name: "returns error if there is more than one inner generator in the second base generator",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
{
|
||||
Git: gitGenerator,
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
expectedErr: ErrMoreThenOneInnerGenerators,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
mock := &generatorMock{}
|
||||
appSet := &argoprojiov1alpha1.ApplicationSet{}
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
}
|
||||
mock.On("GenerateParams", &gitGeneratorSpec, appSet).Return([]map[string]string{
|
||||
{
|
||||
"path": "app1",
|
||||
"path.basename": "app1",
|
||||
"path.basenameNormalized": "app1",
|
||||
},
|
||||
{
|
||||
"path": "app2",
|
||||
"path.basename": "app2",
|
||||
"path.basenameNormalized": "app2",
|
||||
},
|
||||
}, nil)
|
||||
|
||||
mock.On("GetTemplate", &gitGeneratorSpec).
|
||||
Return(&argoprojiov1alpha1.ApplicationSetTemplate{})
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
got, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}, appSet)
|
||||
|
||||
if testCaseCopy.expectedErr != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedErr.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
|
||||
gitGenerator := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
}
|
||||
|
||||
listGenerator := &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
pullRequestGenerator := &argoprojiov1alpha1.PullRequestGenerator{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
gitGetRequeueAfter time.Duration
|
||||
expected time.Duration
|
||||
}{
|
||||
{
|
||||
name: "return NoRequeueAfter if all the inner baseGenerators returns it",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: NoRequeueAfter,
|
||||
expected: NoRequeueAfter,
|
||||
},
|
||||
{
|
||||
name: "returns the minimal time",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
List: listGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: time.Duration(1),
|
||||
expected: time.Duration(1),
|
||||
},
|
||||
{
|
||||
name: "returns the minimal time for pull request",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: time.Duration(15 * time.Second),
|
||||
expected: time.Duration(15 * time.Second),
|
||||
},
|
||||
{
|
||||
name: "returns the default time if no requeueAfterSeconds is provided",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
expected: time.Duration(30 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
mock := &generatorMock{}
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
PullRequest: g.PullRequest,
|
||||
}
|
||||
mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil)
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"PullRequest": &PullRequestGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
got := matrixGenerator.GetRequeueAfter(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
type generatorMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (g *generatorMock) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
args := g.Called(appSetGenerator)
|
||||
|
||||
return args.Get(0).(*argoprojiov1alpha1.ApplicationSetTemplate)
|
||||
}
|
||||
|
||||
func (g *generatorMock) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
args := g.Called(appSetGenerator, appSet)
|
||||
|
||||
return args.Get(0).([]map[string]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (g *generatorMock) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
args := g.Called(appSetGenerator)
|
||||
|
||||
return args.Get(0).(time.Duration)
|
||||
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*MergeGenerator)(nil)
|
||||
|
||||
var (
|
||||
ErrLessThanTwoGeneratorsInMerge = fmt.Errorf("found less than two generators, Merge requires two or more")
|
||||
ErrNoMergeKeys = fmt.Errorf("no merge keys were specified, Merge requires at least one")
|
||||
ErrNonUniqueParamSets = fmt.Errorf("the parameters from a generator were not unique by the given mergeKeys, Merge requires all param sets to be unique")
|
||||
)
|
||||
|
||||
type MergeGenerator struct {
|
||||
// The inner generators supported by the merge generator (cluster, git, list...)
|
||||
supportedGenerators map[string]Generator
|
||||
}
|
||||
|
||||
// NewMergeGenerator returns a MergeGenerator which allows the given supportedGenerators as child generators.
|
||||
func NewMergeGenerator(supportedGenerators map[string]Generator) Generator {
|
||||
m := &MergeGenerator{
|
||||
supportedGenerators: supportedGenerators,
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// getParamSetsForAllGenerators generates params for each child generator in a MergeGenerator. Param sets are returned
|
||||
// in slices ordered according to the order of the given generators.
|
||||
func (m *MergeGenerator) getParamSetsForAllGenerators(generators []argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([][]map[string]string, error) {
|
||||
var paramSets [][]map[string]string
|
||||
for _, generator := range generators {
|
||||
generatorParamSets, err := m.getParams(generator, appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// concatenate param lists produced by each generator
|
||||
paramSets = append(paramSets, generatorParamSets)
|
||||
}
|
||||
return paramSets, nil
|
||||
}
|
||||
|
||||
// GenerateParams gets the params produced by the MergeGenerator.
|
||||
func (m *MergeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
if appSetGenerator.Merge == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if len(appSetGenerator.Merge.Generators) < 2 {
|
||||
return nil, ErrLessThanTwoGeneratorsInMerge
|
||||
}
|
||||
|
||||
paramSetsFromGenerators, err := m.getParamSetsForAllGenerators(appSetGenerator.Merge.Generators, appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseParamSetsByMergeKey, err := getParamSetsByMergeKey(appSetGenerator.Merge.MergeKeys, paramSetsFromGenerators[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, paramSets := range paramSetsFromGenerators[1:] {
|
||||
paramSetsByMergeKey, err := getParamSetsByMergeKey(appSetGenerator.Merge.MergeKeys, paramSets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for mergeKeyValue, baseParamSet := range baseParamSetsByMergeKey {
|
||||
if overrideParamSet, exists := paramSetsByMergeKey[mergeKeyValue]; exists {
|
||||
overriddenParamSet, err := utils.CombineStringMapsAllowDuplicates(baseParamSet, overrideParamSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseParamSetsByMergeKey[mergeKeyValue] = overriddenParamSet
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mergedParamSets := make([]map[string]string, len(baseParamSetsByMergeKey))
|
||||
var i = 0
|
||||
for _, mergedParamSet := range baseParamSetsByMergeKey {
|
||||
mergedParamSets[i] = mergedParamSet
|
||||
i += 1
|
||||
}
|
||||
|
||||
return mergedParamSets, nil
|
||||
}
|
||||
|
||||
// getParamSetsByMergeKey converts the given list of parameter sets to a map of parameter sets where the key is the
|
||||
// unique key of the parameter set as determined by the given mergeKeys. If any two parameter sets share the same merge
|
||||
// key, getParamSetsByMergeKey will throw NonUniqueParamSets.
|
||||
func getParamSetsByMergeKey(mergeKeys []string, paramSets []map[string]string) (map[string]map[string]string, error) {
|
||||
if len(mergeKeys) < 1 {
|
||||
return nil, ErrNoMergeKeys
|
||||
}
|
||||
|
||||
deDuplicatedMergeKeys := make(map[string]bool, len(mergeKeys))
|
||||
for _, mergeKey := range mergeKeys {
|
||||
deDuplicatedMergeKeys[mergeKey] = false
|
||||
}
|
||||
|
||||
paramSetsByMergeKey := make(map[string]map[string]string, len(paramSets))
|
||||
for _, paramSet := range paramSets {
|
||||
paramSetKey := make(map[string]string)
|
||||
for mergeKey := range deDuplicatedMergeKeys {
|
||||
paramSetKey[mergeKey] = paramSet[mergeKey]
|
||||
}
|
||||
paramSetKeyJson, err := json.Marshal(paramSetKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paramSetKeyString := string(paramSetKeyJson)
|
||||
if _, exists := paramSetsByMergeKey[paramSetKeyString]; exists {
|
||||
return nil, fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, paramSetKeyString)
|
||||
}
|
||||
paramSetsByMergeKey[paramSetKeyString] = paramSet
|
||||
}
|
||||
|
||||
return paramSetsByMergeKey, nil
|
||||
}
|
||||
|
||||
// getParams get the parameters generated by this generator.
|
||||
func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
matrixGen, err := getMatrixGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: appSetBaseGenerator.List,
|
||||
Clusters: appSetBaseGenerator.Clusters,
|
||||
Git: appSetBaseGenerator.Git,
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
appSet)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("child generator returned an error on parameter generation: %v", err)
|
||||
}
|
||||
|
||||
if len(t) == 0 {
|
||||
return nil, fmt.Errorf("child generator generated no parameters")
|
||||
}
|
||||
|
||||
if len(t) > 1 {
|
||||
return nil, ErrMoreThenOneInnerGenerators
|
||||
}
|
||||
|
||||
return t[0].Params, nil
|
||||
}
|
||||
|
||||
func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
res := maxDuration
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Merge.Generators {
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
for _, g := range generators {
|
||||
temp := g.GetRequeueAfter(base)
|
||||
if temp < res && temp != NoRequeueAfter {
|
||||
found = true
|
||||
res = temp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
return res
|
||||
} else {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getMergeGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MergeGenerator, error) {
|
||||
if r.Merge == nil {
|
||||
return nil, nil
|
||||
}
|
||||
merge, err := argoprojiov1alpha1.ToNestedMergeGenerator(r.Merge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return merge.ToMergeGenerator(), nil
|
||||
}
|
||||
|
||||
// GetTemplate gets the Template field for the MergeGenerator.
|
||||
func (m *MergeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Merge.Template
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func getNestedListGenerator(json string) *argoprojiov1alpha1.ApplicationSetNestedGenerator {
|
||||
return &argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(json)}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getTerminalListGeneratorMultiple(jsons []string) argoprojiov1alpha1.ApplicationSetTerminalGenerator {
|
||||
elements := make([]apiextensionsv1.JSON, len(jsons))
|
||||
|
||||
for i, json := range jsons {
|
||||
elements[i] = apiextensionsv1.JSON{Raw: []byte(json)}
|
||||
}
|
||||
|
||||
generator := argoprojiov1alpha1.ApplicationSetTerminalGenerator{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: elements,
|
||||
},
|
||||
}
|
||||
|
||||
return generator
|
||||
}
|
||||
|
||||
func listOfMapsToSet(maps []map[string]string) (map[string]bool, error) {
|
||||
set := make(map[string]bool, len(maps))
|
||||
for _, paramMap := range maps {
|
||||
paramMapAsJson, err := json.Marshal(paramMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
set[string(paramMapAsJson)] = false
|
||||
}
|
||||
return set, nil
|
||||
}
|
||||
|
||||
func TestMergeGenerate(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
mergeKeys []string
|
||||
expectedErr error
|
||||
expected []map[string]string
|
||||
}{
|
||||
{
|
||||
name: "no generators",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{},
|
||||
mergeKeys: []string{"b"},
|
||||
expectedErr: ErrLessThanTwoGeneratorsInMerge,
|
||||
},
|
||||
{
|
||||
name: "one generator",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
*getNestedListGenerator(`{"a": "1_1","b": "same","c": "1_3"}`),
|
||||
},
|
||||
mergeKeys: []string{"b"},
|
||||
expectedErr: ErrLessThanTwoGeneratorsInMerge,
|
||||
},
|
||||
{
|
||||
name: "happy flow - generate paramSets",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
*getNestedListGenerator(`{"a": "1_1","b": "same","c": "1_3"}`),
|
||||
*getNestedListGenerator(`{"a": "2_1","b": "same"}`),
|
||||
*getNestedListGenerator(`{"a": "3_1","b": "different","c": "3_3"}`), // gets ignored because its merge key value isn't in the base params set
|
||||
},
|
||||
mergeKeys: []string{"b"},
|
||||
expected: []map[string]string{
|
||||
{"a": "2_1", "b": "same", "c": "1_3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge keys absent - do not merge",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
*getNestedListGenerator(`{"a": "a"}`),
|
||||
*getNestedListGenerator(`{"a": "a"}`),
|
||||
},
|
||||
mergeKeys: []string{"b"},
|
||||
expected: []map[string]string{
|
||||
{"a": "a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge key present in first set, absent in second - do not merge",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
*getNestedListGenerator(`{"a": "a"}`),
|
||||
*getNestedListGenerator(`{"b": "b"}`),
|
||||
},
|
||||
mergeKeys: []string{"b"},
|
||||
expected: []map[string]string{
|
||||
{"a": "a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge nested matrix with some lists",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Matrix: toAPIExtensionsJSON(t, &argoprojiov1alpha1.NestedMatrixGenerator{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetTerminalGenerator{
|
||||
getTerminalListGeneratorMultiple([]string{`{"a": "1"}`, `{"a": "2"}`}),
|
||||
getTerminalListGeneratorMultiple([]string{`{"b": "1"}`, `{"b": "2"}`}),
|
||||
},
|
||||
}),
|
||||
},
|
||||
*getNestedListGenerator(`{"a": "1", "b": "1", "c": "added"}`),
|
||||
},
|
||||
mergeKeys: []string{"a", "b"},
|
||||
expected: []map[string]string{
|
||||
{"a": "1", "b": "1", "c": "added"},
|
||||
{"a": "1", "b": "2"},
|
||||
{"a": "2", "b": "1"},
|
||||
{"a": "2", "b": "2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "merge nested merge with some lists",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Merge: toAPIExtensionsJSON(t, &argoprojiov1alpha1.NestedMergeGenerator{
|
||||
MergeKeys: []string{"a"},
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetTerminalGenerator{
|
||||
getTerminalListGeneratorMultiple([]string{`{"a": "1", "b": "1"}`, `{"a": "2", "b": "2"}`}),
|
||||
getTerminalListGeneratorMultiple([]string{`{"a": "1", "b": "3", "c": "added"}`, `{"a": "3", "b": "2"}`}), // First gets merged, second gets ignored
|
||||
},
|
||||
}),
|
||||
},
|
||||
*getNestedListGenerator(`{"a": "1", "b": "3", "d": "added"}`),
|
||||
},
|
||||
mergeKeys: []string{"a", "b"},
|
||||
expected: []map[string]string{
|
||||
{"a": "1", "b": "3", "c": "added", "d": "added"},
|
||||
{"a": "2", "b": "2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
appSet := &argoprojiov1alpha1.ApplicationSet{}
|
||||
|
||||
var mergeGenerator = NewMergeGenerator(
|
||||
map[string]Generator{
|
||||
"List": &ListGenerator{},
|
||||
"Matrix": &MatrixGenerator{
|
||||
supportedGenerators: map[string]Generator{
|
||||
"List": &ListGenerator{},
|
||||
},
|
||||
},
|
||||
"Merge": &MergeGenerator{
|
||||
supportedGenerators: map[string]Generator{
|
||||
"List": &ListGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
got, err := mergeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Merge: &argoprojiov1alpha1.MergeGenerator{
|
||||
Generators: testCaseCopy.baseGenerators,
|
||||
MergeKeys: testCaseCopy.mergeKeys,
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}, appSet)
|
||||
|
||||
if testCaseCopy.expectedErr != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedErr.Error())
|
||||
} else {
|
||||
expectedSet, err := listOfMapsToSet(testCaseCopy.expected)
|
||||
assert.NoError(t, err)
|
||||
|
||||
actualSet, err := listOfMapsToSet(got)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedSet, actualSet)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func toAPIExtensionsJSON(t *testing.T, g interface{}) *apiextensionsv1.JSON {
|
||||
|
||||
resVal, err := json.Marshal(g)
|
||||
if err != nil {
|
||||
t.Error("unable to unmarshal json", g)
|
||||
return nil
|
||||
}
|
||||
|
||||
res := &apiextensionsv1.JSON{Raw: resVal}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func TestParamSetsAreUniqueByMergeKeys(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
mergeKeys []string
|
||||
paramSets []map[string]string
|
||||
expectedErr error
|
||||
expected map[string]map[string]string
|
||||
}{
|
||||
{
|
||||
name: "no merge keys",
|
||||
mergeKeys: []string{},
|
||||
expectedErr: ErrNoMergeKeys,
|
||||
},
|
||||
{
|
||||
name: "no paramSets",
|
||||
mergeKeys: []string{"key"},
|
||||
expected: make(map[string]map[string]string),
|
||||
},
|
||||
{
|
||||
name: "simple key, unique paramSets",
|
||||
mergeKeys: []string{"key"},
|
||||
paramSets: []map[string]string{{"key": "a"}, {"key": "b"}},
|
||||
expected: map[string]map[string]string{
|
||||
`{"key":"a"}`: {"key": "a"},
|
||||
`{"key":"b"}`: {"key": "b"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple key, non-unique paramSets",
|
||||
mergeKeys: []string{"key"},
|
||||
paramSets: []map[string]string{{"key": "a"}, {"key": "b"}, {"key": "b"}},
|
||||
expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key":"b"}`),
|
||||
},
|
||||
{
|
||||
name: "simple key, duplicated key name, unique paramSets",
|
||||
mergeKeys: []string{"key", "key"},
|
||||
paramSets: []map[string]string{{"key": "a"}, {"key": "b"}},
|
||||
expected: map[string]map[string]string{
|
||||
`{"key":"a"}`: {"key": "a"},
|
||||
`{"key":"b"}`: {"key": "b"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple key, duplicated key name, non-unique paramSets",
|
||||
mergeKeys: []string{"key", "key"},
|
||||
paramSets: []map[string]string{{"key": "a"}, {"key": "b"}, {"key": "b"}},
|
||||
expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key":"b"}`),
|
||||
},
|
||||
{
|
||||
name: "compound key, unique paramSets",
|
||||
mergeKeys: []string{"key1", "key2"},
|
||||
paramSets: []map[string]string{
|
||||
{"key1": "a", "key2": "a"},
|
||||
{"key1": "a", "key2": "b"},
|
||||
{"key1": "b", "key2": "a"},
|
||||
},
|
||||
expected: map[string]map[string]string{
|
||||
`{"key1":"a","key2":"a"}`: {"key1": "a", "key2": "a"},
|
||||
`{"key1":"a","key2":"b"}`: {"key1": "a", "key2": "b"},
|
||||
`{"key1":"b","key2":"a"}`: {"key1": "b", "key2": "a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "compound key, duplicate key names, unique paramSets",
|
||||
mergeKeys: []string{"key1", "key1", "key2"},
|
||||
paramSets: []map[string]string{
|
||||
{"key1": "a", "key2": "a"},
|
||||
{"key1": "a", "key2": "b"},
|
||||
{"key1": "b", "key2": "a"},
|
||||
},
|
||||
expected: map[string]map[string]string{
|
||||
`{"key1":"a","key2":"a"}`: {"key1": "a", "key2": "a"},
|
||||
`{"key1":"a","key2":"b"}`: {"key1": "a", "key2": "b"},
|
||||
`{"key1":"b","key2":"a"}`: {"key1": "b", "key2": "a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "compound key, non-unique paramSets",
|
||||
mergeKeys: []string{"key1", "key2"},
|
||||
paramSets: []map[string]string{
|
||||
{"key1": "a", "key2": "a"},
|
||||
{"key1": "a", "key2": "a"},
|
||||
{"key1": "b", "key2": "a"},
|
||||
},
|
||||
expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key1":"a","key2":"a"}`),
|
||||
},
|
||||
{
|
||||
name: "compound key, duplicate key names, non-unique paramSets",
|
||||
mergeKeys: []string{"key1", "key1", "key2"},
|
||||
paramSets: []map[string]string{
|
||||
{"key1": "a", "key2": "a"},
|
||||
{"key1": "a", "key2": "a"},
|
||||
{"key1": "b", "key2": "a"},
|
||||
},
|
||||
expectedErr: fmt.Errorf("%w. Duplicate key was %s", ErrNonUniqueParamSets, `{"key1":"a","key2":"a"}`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // since tests may run in parallel
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got, err := getParamSetsByMergeKey(testCaseCopy.mergeKeys, testCaseCopy.paramSets)
|
||||
|
||||
if testCaseCopy.expectedErr != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedErr.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/pull_request"
|
||||
pullrequest "github.com/argoproj/argo-cd/v2/applicationset/services/pull_request"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*PullRequestGenerator)(nil)
|
||||
|
||||
const (
|
||||
DefaultPullRequestRequeueAfterSeconds = 30 * time.Minute
|
||||
)
|
||||
|
||||
type PullRequestGenerator struct {
|
||||
client client.Client
|
||||
selectServiceProviderFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
}
|
||||
|
||||
func NewPullRequestGenerator(client client.Client) Generator {
|
||||
g := &PullRequestGenerator{
|
||||
client: client,
|
||||
}
|
||||
g.selectServiceProviderFunc = g.selectServiceProvider
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *PullRequestGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
// Return a requeue default of 30 minutes, if no default is specified.
|
||||
|
||||
if appSetGenerator.PullRequest.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.PullRequest.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultPullRequestRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *PullRequestGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.PullRequest.Template
|
||||
}
|
||||
|
||||
func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.PullRequest == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
svc, err := g.selectServiceProviderFunc(ctx, appSetGenerator.PullRequest, applicationSetInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to select pull request service provider: %v", err)
|
||||
}
|
||||
|
||||
pulls, err := pull_request.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing repos: %v", err)
|
||||
}
|
||||
params := make([]map[string]string, 0, len(pulls))
|
||||
for _, pull := range pulls {
|
||||
params = append(params, map[string]string{
|
||||
"number": strconv.Itoa(pull.Number),
|
||||
"branch": pull.Branch,
|
||||
"head_sha": pull.HeadSHA,
|
||||
})
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
// selectServiceProvider selects the provider to get pull requests from the configuration
|
||||
func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, generatorConfig *argoprojiov1alpha1.PullRequestGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
if generatorConfig.Github != nil {
|
||||
providerConfig := generatorConfig.Github
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
return pullrequest.NewGithubService(ctx, token, providerConfig.API, providerConfig.Owner, providerConfig.Repo, providerConfig.Labels)
|
||||
}
|
||||
if generatorConfig.Gitea != nil {
|
||||
providerConfig := generatorConfig.Gitea
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
return pullrequest.NewGiteaService(ctx, token, providerConfig.API, providerConfig.Owner, providerConfig.Repo, providerConfig.Insecure)
|
||||
}
|
||||
if generatorConfig.BitbucketServer != nil {
|
||||
providerConfig := generatorConfig.BitbucketServer
|
||||
if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
return pullrequest.NewBitbucketServiceBasicAuth(ctx, providerConfig.BasicAuth.Username, password, providerConfig.API, providerConfig.Project, providerConfig.Repo)
|
||||
} else {
|
||||
return pullrequest.NewBitbucketServiceNoAuth(ctx, providerConfig.API, providerConfig.Project, providerConfig.Repo)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no Pull Request provider implementation configured")
|
||||
}
|
||||
|
||||
// getSecretRef gets the value of the key for the specified Secret resource.
|
||||
func (g *PullRequestGenerator) getSecretRef(ctx context.Context, ref *argoprojiov1alpha1.SecretRef, namespace string) (string, error) {
|
||||
if ref == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{}
|
||||
err := g.client.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: ref.SecretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
secret)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error fetching secret %s/%s: %v", namespace, ref.SecretName, err)
|
||||
}
|
||||
tokenBytes, ok := secret.Data[ref.Key]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("key %q in secret %s/%s not found", ref.Key, namespace, ref.SecretName)
|
||||
}
|
||||
return string(tokenBytes), nil
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
pullrequest "github.com/argoproj/argo-cd/v2/applicationset/services/pull_request"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cases := []struct {
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
expected []map[string]string
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
nil,
|
||||
fmt.Errorf("fake error"),
|
||||
)
|
||||
},
|
||||
expected: nil,
|
||||
expectedErr: fmt.Errorf("error listing repos: fake error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
gen := PullRequestGenerator{
|
||||
selectServiceProviderFunc: c.selectFunc,
|
||||
}
|
||||
generatorConfig := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{},
|
||||
}
|
||||
got, gotErr := gen.GenerateParams(&generatorConfig, nil)
|
||||
assert.Equal(t, c.expectedErr, gotErr)
|
||||
assert.ElementsMatch(t, c.expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullRequestGetSecretRef(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: "test"},
|
||||
Data: map[string][]byte{
|
||||
"my-token": []byte("secret"),
|
||||
},
|
||||
}
|
||||
gen := &PullRequestGenerator{client: fake.NewClientBuilder().WithObjects(secret).Build()}
|
||||
ctx := context.Background()
|
||||
|
||||
cases := []struct {
|
||||
name, namespace, token string
|
||||
ref *argoprojiov1alpha1.SecretRef
|
||||
hasError bool
|
||||
}{
|
||||
{
|
||||
name: "valid ref",
|
||||
ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "my-token"},
|
||||
namespace: "test",
|
||||
token: "secret",
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "nil ref",
|
||||
ref: nil,
|
||||
namespace: "test",
|
||||
token: "",
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "wrong name",
|
||||
ref: &argoprojiov1alpha1.SecretRef{SecretName: "other", Key: "my-token"},
|
||||
namespace: "test",
|
||||
token: "",
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "wrong key",
|
||||
ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "other-token"},
|
||||
namespace: "test",
|
||||
token: "",
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "wrong namespace",
|
||||
ref: &argoprojiov1alpha1.SecretRef{SecretName: "test-secret", Key: "my-token"},
|
||||
namespace: "other",
|
||||
token: "",
|
||||
hasError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
token, err := gen.getSecretRef(ctx, c.ref, c.namespace)
|
||||
if c.hasError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
assert.Equal(t, c.token, token)
|
||||
})
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user