mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 17:48:47 +01:00
Compare commits
49 Commits
requeue-bu
...
v2.2.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78d749ec88 | ||
|
|
8217d70085 | ||
|
|
02e61797b3 | ||
|
|
998f063a80 | ||
|
|
987f6659b8 | ||
|
|
e099a6a851 | ||
|
|
afbd59ba63 | ||
|
|
b1e3a07d92 | ||
|
|
c3144c0059 | ||
|
|
33547f149b | ||
|
|
06dc9aa836 | ||
|
|
03b17e0233 | ||
|
|
d5909f7168 | ||
|
|
7d0d665747 | ||
|
|
834a102c09 | ||
|
|
4bcd8cf733 | ||
|
|
a069c602dc | ||
|
|
e309ceebac | ||
|
|
4a7f0bbfd8 | ||
|
|
28a54bf2a2 | ||
|
|
e209426a7e | ||
|
|
06a95f86ce | ||
|
|
122ecefc3a | ||
|
|
004d73ce92 | ||
|
|
81e1a58328 | ||
|
|
84f949ff17 | ||
|
|
a7e7f32a0f | ||
|
|
6da92a8e81 | ||
|
|
d5368f5714 | ||
|
|
25cfb27d51 | ||
|
|
47d23e1f07 | ||
|
|
1dc14dc172 | ||
|
|
5c06333914 | ||
|
|
656bee1402 | ||
|
|
2a30c92a7e | ||
|
|
6a1fec9d33 | ||
|
|
0faeeb843d | ||
|
|
48bdabad1a | ||
|
|
c3fd7f5f2d | ||
|
|
3f75a7faa3 | ||
|
|
0f14657301 | ||
|
|
02768367b5 | ||
|
|
3b628b3af8 | ||
|
|
d8d2920eff | ||
|
|
1a72853ca3 | ||
|
|
5354e7d823 | ||
|
|
34d8f12c99 | ||
|
|
8840688e6e | ||
|
|
c081ef0b00 |
@@ -7,7 +7,6 @@ ignore:
|
||||
- "pkg/apis/client/.*"
|
||||
- "pkg/client/.*"
|
||||
- "vendor/.*"
|
||||
- "test/.*"
|
||||
coverage:
|
||||
status:
|
||||
# we've found this not to be useful
|
||||
|
||||
10
.github/ISSUE_TEMPLATE/bug_report.md
vendored
10
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -6,7 +6,7 @@ labels: 'bug'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
<!-- If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a question in argocd slack [channel](https://argoproj.github.io/community/join-slack). -->
|
||||
If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a question in argocd slack [channel](https://argoproj.github.io/community/join-slack).
|
||||
|
||||
Checklist:
|
||||
|
||||
@@ -16,19 +16,19 @@ Checklist:
|
||||
|
||||
**Describe the bug**
|
||||
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue. -->
|
||||
A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue.
|
||||
|
||||
**Expected behavior**
|
||||
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
|
||||
<!-- If applicable, add screenshots to help explain your problem. -->
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Version**
|
||||
|
||||
|
||||
133
.github/workflows/ci-build.yaml
vendored
133
.github/workflows/ci-build.yaml
vendored
@@ -12,24 +12,27 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.16.11'
|
||||
|
||||
jobs:
|
||||
build-docker:
|
||||
name: Build Docker image
|
||||
runs-on: ubuntu-latest
|
||||
if: github.head_ref != ''
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
make image
|
||||
check-go:
|
||||
name: Ensure Go modules synchronicity
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -45,13 +48,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -62,41 +65,31 @@ jobs:
|
||||
run: make build-local
|
||||
|
||||
lint-go:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Lint Go code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
uses: actions/checkout@v2
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: v1.45.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
version: v1.38.0
|
||||
args: --timeout 10m --exclude SA5011
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -116,17 +109,13 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -137,12 +126,12 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
@@ -152,18 +141,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -183,17 +169,13 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -204,7 +186,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -214,9 +196,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -240,10 +222,9 @@ jobs:
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
- name: Initialize local Helm
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
helm2 init --client-only
|
||||
- name: Run codegen
|
||||
run: |
|
||||
set -x
|
||||
@@ -262,14 +243,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12.18.4'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -283,7 +264,6 @@ jobs:
|
||||
env:
|
||||
NODE_ENV: production
|
||||
NODE_ONLINE_ENV: online
|
||||
HOST_ARCH: amd64
|
||||
working-directory: ui/
|
||||
- name: Run ESLint
|
||||
run: yarn lint
|
||||
@@ -299,12 +279,12 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -315,16 +295,16 @@ jobs:
|
||||
run: |
|
||||
mkdir -p test-results
|
||||
- name: Get code coverage artifiact
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: code-coverage
|
||||
- name: Get test result artifact
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: coverage.out
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
@@ -360,7 +340,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.23.3, v1.22.6, v1.21.2]
|
||||
k3s-version: [v1.21.2, v1.20.2, v1.19.2, v1.18.9, v1.17.11]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -373,26 +353,16 @@ jobs:
|
||||
ARGOCD_IN_CI: "true"
|
||||
ARGOCD_E2E_APISERVER_PORT: "8088"
|
||||
ARGOCD_SERVER: "127.0.0.1:8088"
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
run: |
|
||||
sudo pkill mono || true
|
||||
# ubuntu-22.04 comes with kubectl, but the version is not pinned. The version as of 2022-12-05 is 1.26.0 which
|
||||
# breaks the `TestNamespacedResourceDiffing` e2e test. So we'll pin to 1.25 and then fix the underlying issue.
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
rm /usr/local/bin/kubectl
|
||||
curl -LO https://dl.k8s.io/release/v1.25.4/bin/linux/amd64/kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
chmod +x /usr/local/bin/kubectl
|
||||
- name: Install K3S
|
||||
env:
|
||||
INSTALL_K3S_VERSION: ${{ matrix.k3s-version }}+k3s1
|
||||
@@ -405,7 +375,7 @@ jobs:
|
||||
sudo chown runner $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -415,13 +385,10 @@ jobs:
|
||||
- name: Add /usr/local/bin to PATH
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Add ./dist to PATH
|
||||
run: |
|
||||
echo "$(pwd)/dist" >> $GITHUB_PATH
|
||||
- name: Download Go dependencies
|
||||
run: |
|
||||
go mod download
|
||||
go install github.com/mattn/goreman@latest
|
||||
go get github.com/mattn/goreman
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
@@ -431,9 +398,9 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3
|
||||
docker pull quay.io/dexidp/dex:v2.25.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.7-alpine
|
||||
docker pull redis:6.2.6-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
@@ -450,7 +417,7 @@ jobs:
|
||||
count=1
|
||||
until curl -f http://127.0.0.1:8088/healthz; do
|
||||
sleep 10;
|
||||
if test $count -ge 180; then
|
||||
if test $count -ge 60; then
|
||||
echo "Timeout"
|
||||
exit 1
|
||||
fi
|
||||
@@ -461,7 +428,7 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
32
.github/workflows/codeql.yml
vendored
32
.github/workflows/codeql.yml
vendored
@@ -2,38 +2,32 @@ name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
# Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot
|
||||
branches-ignore:
|
||||
- 'dependabot/**'
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
permissions:
|
||||
actions: read # for github/codeql-action/init to get workflow details
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/autobuild to send a status report
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
@@ -41,7 +35,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -55,4 +49,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
uses: github/codeql-action/analyze@v1
|
||||
|
||||
23
.github/workflows/gh-pages.yaml
vendored
Normal file
23
.github/workflows/gh-pages.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.9.8
|
||||
- name: build
|
||||
run: |
|
||||
pip install -r docs/requirements.txt
|
||||
mkdocs build
|
||||
132
.github/workflows/image.yaml
vendored
132
.github/workflows/image.yaml
vendored
@@ -4,143 +4,50 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
types: [ labeled, unlabeled, opened, synchronize, reopened ]
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.16.11'
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
permissions:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
steps:
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
- uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
|
||||
# get image tag
|
||||
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
id: image
|
||||
|
||||
# login
|
||||
# build
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
|
||||
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
|
||||
if: github.event_name == 'push'
|
||||
docker images -a --format "{{.ID}}" | xargs -I {} docker rmi {}
|
||||
make image DEV_IMAGE=true DOCKER_PUSH=false IMAGE_NAMESPACE=ghcr.io/argoproj IMAGE_TAG=${{ steps.image.outputs.tag }}
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
# publish
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password $PASSWORD
|
||||
docker push ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
|
||||
|
||||
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker tag ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} argoproj/argocd:latest
|
||||
docker push argoproj/argocd:latest
|
||||
env:
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.TOKEN }}
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
|
||||
- name: Setup cache for argocd-ui docker layer
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-single-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-single-buildx
|
||||
|
||||
- name: Build cache for argocd-ui stage
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./src/github.com/argoproj/argo-cd
|
||||
target: argocd-ui
|
||||
push: false
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
|
||||
|
||||
- name: Run non-container Snyk scans
|
||||
if: github.event_name == 'push'
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
run: |
|
||||
npm install -g snyk
|
||||
|
||||
# Run with high threshold to fail build.
|
||||
snyk test --org=argoproj --all-projects --exclude=docs,site --severity-threshold=high --policy-path=.snyk
|
||||
snyk iac test manifests/install.yaml --org=argoproj --severity-threshold=high --policy-path=.snyk
|
||||
|
||||
- run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
then
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
|
||||
--cache-from "type=local,src=/tmp/.buildx-cache" \
|
||||
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Run container Snyk scan
|
||||
if: github.event_name == 'push'
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
run: |
|
||||
snyk container test quay.io/argoproj/argocd:latest --org=argoproj --file=Dockerfile --severity-threshold=high
|
||||
|
||||
# Temp fix
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
# https://github.com/moby/buildkit/issues/1896
|
||||
- name: Clean up build cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
|
||||
|
||||
# sign container images
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD latest image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
|
||||
# Displays the public key to share.
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
|
||||
|
||||
# deploy
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
- run: |
|
||||
@@ -148,6 +55,5 @@ jobs:
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
|
||||
if: github.event_name == 'push'
|
||||
working-directory: argoproj-deployments/argocd
|
||||
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-GitHub-Package-Registry/m-p/41202/thread-id/9811
|
||||
|
||||
181
.github/workflows/release.yaml
vendored
181
.github/workflows/release.yaml
vendored
@@ -2,27 +2,21 @@ name: Create ArgoCD release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "release-v*"
|
||||
- "!release-v1.5*"
|
||||
- "!release-v1.4*"
|
||||
- "!release-v1.3*"
|
||||
- "!release-v1.2*"
|
||||
- "!release-v1.1*"
|
||||
- "!release-v1.0*"
|
||||
- "!release-v0*"
|
||||
- 'release-v*'
|
||||
- '!release-v1.5*'
|
||||
- '!release-v1.4*'
|
||||
- '!release-v1.3*'
|
||||
- '!release-v1.2*'
|
||||
- '!release-v1.1*'
|
||||
- '!release-v1.0*'
|
||||
- '!release-v0*'
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
GOLANG_VERSION: '1.16.11'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
permissions:
|
||||
contents: write # To push changes to release branch
|
||||
name: Perform automatic release on trigger ${{ github.ref }}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# The name of the tag as supplied by the GitHub event
|
||||
@@ -43,7 +37,7 @@ jobs:
|
||||
GIT_EMAIL: argoproj@gmail.com
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -100,7 +94,7 @@ jobs:
|
||||
echo "=========== BEGIN COMMIT MESSAGE ============="
|
||||
git show ${SOURCE_TAG}
|
||||
echo "============ END COMMIT MESSAGE =============="
|
||||
|
||||
|
||||
# Quite dirty hack to get the release notes from the annotated tag
|
||||
# into a temporary file.
|
||||
RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX)
|
||||
@@ -147,7 +141,7 @@ jobs:
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -177,10 +171,7 @@ jobs:
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
|
||||
# We install kustomize in the dist directory
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
|
||||
helm2 init --client-only
|
||||
make manifests-local VERSION=${TARGET_VERSION}
|
||||
git diff
|
||||
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
|
||||
@@ -191,7 +182,18 @@ jobs:
|
||||
echo "Creating release ${RELEASE_TAG}"
|
||||
git tag ${RELEASE_TAG}
|
||||
|
||||
- name: Login to docker repositories
|
||||
- name: Build Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
make image IMAGE_TAG="${TARGET_VERSION}" DOCKER_PUSH=false
|
||||
make release-cli
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
./dist/argocd-linux-amd64 version --client
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Push docker image to repository
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
|
||||
@@ -199,52 +201,18 @@ jobs:
|
||||
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
run: |
|
||||
set -ue
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_TOKEN}"
|
||||
docker push ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
|
||||
# Remove the following when Docker Hub is gone
|
||||
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
./dist/argocd-linux-amd64 version --client
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD container images and assets
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST }}
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
|
||||
# Retrieves the public key to release as an asset
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker tag ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} argoproj/argocd:v${TARGET_VERSION}
|
||||
docker push argoproj/argocd:v${TARGET_VERSION}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
|
||||
with:
|
||||
uses: juliangruber/read-file-action@v1
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
- name: Push changes to release branch
|
||||
@@ -253,8 +221,8 @@ jobs:
|
||||
git push origin ${TARGET_BRANCH}
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
|
||||
- name: Create GitHub release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: create_release
|
||||
@@ -264,67 +232,44 @@ jobs:
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }}
|
||||
if: ${{ env.DRY_RUN == 'true' }}
|
||||
|
||||
- name: Generate SBOM (spdx)
|
||||
id: spdx-builder
|
||||
env:
|
||||
# defines the spdx/spdx-sbom-generator version to use.
|
||||
SPDX_GEN_VERSION: v0.0.13
|
||||
# defines the sigs.k8s.io/bom version to use.
|
||||
SIGS_BOM_VERSION: v0.2.1
|
||||
# comma delimited list of project relative folders to inspect for package
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
run: |
|
||||
yarn install --cwd ./ui
|
||||
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
|
||||
go install sigs.k8s.io/bom/cmd/bom@$SIGS_BOM_VERSION
|
||||
|
||||
# Generate SPDX for project dependencies analyzing package managers
|
||||
for folder in $(echo $PROJECT_FOLDERS | sed "s/,/ /g")
|
||||
do
|
||||
generator -p $folder -o /tmp
|
||||
done
|
||||
|
||||
# Generate SPDX for binaries analyzing the docker image
|
||||
if [[ ! -z $DOCKER_IMAGE ]]; then
|
||||
bom generate -o /tmp/bom-docker-image.spdx -i $DOCKER_IMAGE
|
||||
fi
|
||||
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Sign sbom
|
||||
run: |
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
|
||||
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
- name: Upload argocd-linux-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
name: ${{ env.RELEASE_TAG }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
|
||||
files: |
|
||||
dist/argocd-*
|
||||
/tmp/sbom.tar.gz
|
||||
/tmp/sbom.tar.gz.sig
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-linux-amd64
|
||||
asset_name: argocd-linux-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-darwin-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-darwin-amd64
|
||||
asset_name: argocd-darwin-amd64
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Upload argocd-windows-amd64 binary to release assets
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/argocd-windows-amd64.exe
|
||||
asset_name: argocd-windows-amd64.exe
|
||||
asset_content_type: application/octet-stream
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Update homebrew formula
|
||||
env:
|
||||
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
|
||||
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
|
||||
uses: dawidd6/action-homebrew-bump-formula@v3
|
||||
with:
|
||||
token: ${{env.HOMEBREW_TOKEN}}
|
||||
formula: argocd
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,10 +16,9 @@ test-results
|
||||
.scratch
|
||||
node_modules/
|
||||
.kube/
|
||||
./test/cmp/*.sock
|
||||
|
||||
# ignore built binaries
|
||||
cmd/argocd/argocd
|
||||
cmd/argocd-application-controller/argocd-application-controller
|
||||
cmd/argocd-repo-server/argocd-repo-server
|
||||
cmd/argocd-server/argocd-server
|
||||
cmd/argocd-server/argocd-server
|
||||
2
.gitpod.Dockerfile
vendored
2
.gitpod.Dockerfile
vendored
@@ -9,7 +9,7 @@ RUN curl -L https://go.kubebuilder.io/dl/2.3.1/$(go env GOOS)/$(go env GOARCH) |
|
||||
tar -xz -C /tmp/ && mv /tmp/kubebuilder_2.3.1_$(go env GOOS)_$(go env GOARCH) /usr/local/kubebuilder
|
||||
|
||||
RUN apt-get install redis-server -y
|
||||
RUN go install github.com/mattn/goreman@latest
|
||||
RUN go get github.com/mattn/goreman
|
||||
|
||||
USER gitpod
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ image:
|
||||
file: .gitpod.Dockerfile
|
||||
|
||||
tasks:
|
||||
- init: make mod-download-local dep-ui-local && GO111MODULE=off go install github.com/mattn/goreman@latest
|
||||
- init: make mod-download-local dep-ui-local && GO111MODULE=off go get github.com/mattn/goreman
|
||||
command: make start-test-k8s
|
||||
22
.golangci.yml
Normal file
22
.golangci.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
run:
|
||||
timeout: 2m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go"
|
||||
skip-dirs:
|
||||
- pkg/client/
|
||||
- vendor/
|
||||
linters:
|
||||
enable:
|
||||
- vet
|
||||
- deadcode
|
||||
- goimports
|
||||
- varcheck
|
||||
- structcheck
|
||||
- ineffassign
|
||||
- unconvert
|
||||
- unparam
|
||||
linters-settings:
|
||||
goimports:
|
||||
local-prefixes: github.com/argoproj/argo-cd
|
||||
service:
|
||||
golangci-lint-version: 1.21.0
|
||||
22
.snyk
22
.snyk
@@ -1,22 +0,0 @@
|
||||
# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
|
||||
version: v1.22.1
|
||||
# ignores vulnerabilities until expiry date; change duration by modifying expiry date
|
||||
ignore:
|
||||
SNYK-JS-ANSIREGEX-1583908:
|
||||
- '*':
|
||||
reason: >-
|
||||
Code is only run client-side in the swagger-ui endpoint. No risk of
|
||||
server-side DoS.
|
||||
SNYK-CC-K8S-44:
|
||||
- 'manifests/core-install.yaml > *':
|
||||
reason: >-
|
||||
Argo CD needs wide permissions to manage resources.
|
||||
- 'manifests/install.yaml > *':
|
||||
reason: >-
|
||||
Argo CD needs wide permissions to manage resources.
|
||||
SNYK-JS-MOMENT-2440688:
|
||||
- '*':
|
||||
reason: >-
|
||||
Code is only run client-side. No risk of directory traversal.
|
||||
patch: {}
|
||||
|
||||
278
CHANGELOG.md
278
CHANGELOG.md
@@ -1,281 +1,5 @@
|
||||
# Changelog
|
||||
|
||||
## v2.4.0 (Unreleased)
|
||||
|
||||
### Web Terminal In Argo CD UI
|
||||
|
||||
Feature enables engineers to start a shell in the running application container without leaving the web interface. Just find the required Kubernetes
|
||||
Pod using the Application Details page, click on it and select the Terminal tab. The shell starts automatically and enables you to execute the required
|
||||
commands, and helps to troubleshoot the application state.
|
||||
|
||||
### Access Control For Pod Logs & Web Terminal
|
||||
|
||||
Argo CD is used to manage the critical infrastructure of multiple organizations, which makes security the top priority of the project. We've listened to
|
||||
your feedback and introduced additional access control settings that control access to Kubernetes Pod logs and the new Web Terminal feature.
|
||||
|
||||
#### Pod Logs UI
|
||||
|
||||
Since 2.4.9, the LOGS tab in pod view is visible in the UI only for users with explicit allow get logs policy.
|
||||
|
||||
#### Known pod logs UI issue prior to 2.4.9
|
||||
|
||||
Upon pressing the "LOGS" tab in pod view by users who don't have an explicit allow get logs policy, the red "unable to load data: Internal error" is received in the bottom of the screen, and "Failed to load data, please try again" is displayed.
|
||||
|
||||
### OpenTelemetry Tracing Integration
|
||||
|
||||
The new feature allows emitting richer telemetry data that might make identifying performance bottlenecks easier. The new feature is available for argocd-server
|
||||
and argocd-repo-server components and can be enabled using the --otlp-address flag.
|
||||
|
||||
### Power PC and IBM Z Support
|
||||
|
||||
The list of supported architectures has been expanded, and now includes IBM Z (s390x) and PowerPC (ppc64le). Starting with the v2.4 release the official quay.io
|
||||
repository is going to have images for amd64, arm64, ppc64le, and s390x architectures.
|
||||
|
||||
### Other Notable Changes
|
||||
|
||||
Overall v2.4 release includes more than 300 hundred commits from nearly 90 contributors. Here is a short sample of the contributions:
|
||||
|
||||
* Enforce the deployment to remote clusters only
|
||||
* Native support of GCP authentication for GKE
|
||||
* Secured Redis connection
|
||||
* ApplicationSet Gitea support
|
||||
|
||||
|
||||
## v2.3.3 (2022-03-29)
|
||||
|
||||
- fix: prevent excessive repo-server disk usage for large repos (#8845) (#8897)
|
||||
- fix: Set QPS and burst rate for resource ops client (#8915)
|
||||
|
||||
## v2.3.2 (2022-03-22)
|
||||
|
||||
- fix: application resource APIs must enforce project restrictions
|
||||
|
||||
## v2.3.1 (2022-03-10)
|
||||
|
||||
- fix: Retry checkbox unchecked unexpectedly; Sync up with YAML (#8682) (#8720)
|
||||
- chore: Bump stable version of application set addon (#8744)
|
||||
- fix: correct jsonnet paths resolution (#8721)
|
||||
- fix(ui): Applications page incorrectly resets to tiles view. Fixes #8702 (#8718)
|
||||
|
||||
## v2.3.0 (2022-03-05)
|
||||
|
||||
### Argo CD ApplicationSet and Notifications are now part of Argo CD
|
||||
|
||||
Two popular [Argoproj Labs](https://github.com/argoproj-labs) projects [Argo CD ApplicationSet](https://github.com/argoproj/applicationset) and
|
||||
[Argo CD Notifications](https://github.com/argoproj-labs/argocd-notifications) are now part of Argo CD! The default Argo CD installation manifests now
|
||||
bundle both projects out of the box. Going forward you can expect more tightened integration of these projects into Argo CD.
|
||||
|
||||
### New sync and diff strategies
|
||||
|
||||
Users can now configure the Application resource to instruct Argo CD to consider the ignore difference setup during the sync process.
|
||||
In order to do so, add the new sync option RespectIgnoreDifferences=true in the Application resource. Once the sync option is added,
|
||||
Argo CD won't change ignored fields during the syncing process.
|
||||
|
||||
Configuring ignored fields is also easier now. Instead of listing fields one by one users can now leverage the
|
||||
managedFields metadata to instruct Argo CD about trusted managers and automatically ignore any fields owned by them. A new diff customization
|
||||
(managedFieldsManagers) is now available allowing users to specify managers the application should trust and to ignore all fields owned by those managers.
|
||||
Read more about these changes at [New sync and diff strategies in ArgoCD](https://blog.argoproj.io/new-sync-and-diff-strategies-in-argocd-44195d3f8b8c) blog post.
|
||||
|
||||
### ARM Images
|
||||
|
||||
An officially supported ARM 64 image is now available. Enjoy running Argo CD on your Raspberry Pi! Additionally, the image size was reduced by nearly ~50%
|
||||
and is only 200MB now. The ARM version of `argocd` CLI is also available and published as a Github release artifact.
|
||||
|
||||
### Compact Tree View And Click Application Navigation
|
||||
|
||||
The application details page now supports compact application resources tree visualization. Using the "Group Nodes" button, you can collapse the similar resources
|
||||
into a single group node to remove the clutter and make it easier to understand the state of application resources. You still can get detailed information about the collapsed resources by clicking on the group node. The list of collapsed resources will be available in a sliding panel. Compact resource tree is still too big?
|
||||
You can use the zoom in and zoom out feature to make it smaller - or even larger!
|
||||
|
||||
You no longer need to move back and forth between the application details page and the application list page. Instead you can navigate directly to the required application by clicking the search icon in the application details page title.
|
||||
|
||||
### Upgraded Config Management Tools
|
||||
|
||||
Both bundled Helm and Kustomize binaries have been upgraded to the latest versions. Kustomize has been upgraded from 4.2.0 to 4.4.1 and Helm has been upgraded from 3.7.1 to 3.8.0.
|
||||
|
||||
### Bug Fixes and Performance Enhancements
|
||||
|
||||
* Config management tools enhancements:
|
||||
* The skipCrds flag and ability to ignore missing values files for Helm (#8012, #8003)
|
||||
* Additional environment variables for Kustomize (#8096)
|
||||
* Argo CD CLI follows the XDG Base directory standard (#7638)
|
||||
* Redis is no longer used during SSO login (#8241)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
- feat: Add app list and details page views to navigation history (#7776) (#7937)
|
||||
- feat: Add skipCrds flag for helm charts (#8012)
|
||||
- feat: Add visual indicator for newly created pods (#8006)
|
||||
- feat: Added a new Helm option ignoreMissingValueFiles (#7767) (#8003)
|
||||
- feat: Allow configuring system wide ignore differences for all resources (#8224)
|
||||
- feat: Allow escaping dollar in Envsubst (#7961)
|
||||
- feat: Allow external links on Application (#3487) (#8231)
|
||||
- feat: Allow selecting application on detail page (#8176)
|
||||
- feat: Bundle applicationset-controller with argocd (#8148)
|
||||
- feat: Enable specifying root ca for oidc (#6712)
|
||||
- feat: Expose ARGOCD_APP_NAME to the `kustomize build` command (#8096)
|
||||
- feat: Ignore differences owned by trusted managers from managedFields (#7869)
|
||||
- feat: New sync option to use ignore diff configs during sync (#8078)
|
||||
- feat: Provide address flag for admin dashboard command (#8095)
|
||||
- feat: Store "Group Nodes" button state in application details preferences (#8036)
|
||||
- feat: Support specifying cluster by name in addition to API server URL in Cluster API (#8077)
|
||||
- feat: Support XDG Base directory standard (#7638) (#7791)
|
||||
- feat: Use encrypted cookie to store OAuth2 state nonce (instead of redis) (#8241)
|
||||
- feat: Build images on PR and conditionally build arm64 image on push (#8108)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- fix: Add "Restarting MinIO" status to MiniO Tenant health check (#8191)
|
||||
- fix: Add all resources in list view (#7295)
|
||||
- fix: Adding pagination to grouped nodes sliding panel#7837 (#7915)
|
||||
- fix: Allow all resources to add external links (#7923)
|
||||
- fix: Always call ValidateDestination (#7976)
|
||||
- fix: Application exist panic when execute api call (#8188)
|
||||
- fix: Application-icons-alignment (#8054)
|
||||
- fix: Controller panics if resource manifest has incorrect annotation (#8022)
|
||||
- fix: Correctly handle project field during partial cluster update (#7994)
|
||||
- fix: Default value for retry validation #8055 (#8064)
|
||||
- fix: Fix a possible crash when parsing RBAC (#8165)
|
||||
- fix: Grouped node list missing resources on Compact resources view #8014 (#8018)
|
||||
- fix: Issue with headless installation (#7958)
|
||||
- fix: Issue with project scoped resources (#8048)
|
||||
- fix: Kubernetes labels normalization for Prometheus (#7925)
|
||||
- fix: Nested Refresh dropdown does not work on Application Details page #1524 (#7950)
|
||||
- fix: Network line colors and menu icon alignment (#8059)
|
||||
- fix: Opening app details shows UI error on some apps (#8016) (#8019)
|
||||
- fix: Parse to correct uint32 type (#8177)
|
||||
- fix: Prevent possible nil-pointer deref in normalizer (#8185)
|
||||
- fix: Prevent possible out-of-bounds access when loading policies (#8186)
|
||||
- fix: Provide a semantic version parsed version for KUBE_VERSION (#8250)
|
||||
- fix: Refreshing label toast (#7979)
|
||||
- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971)
|
||||
- fix: Retry disabled text (#8004)
|
||||
- fix: Route health check stuck in 'Progressing' (#8170)
|
||||
- fix: Sync window panel is crashed if resource name not contain letters (#8053)
|
||||
- fix: Targetervision compatible without prefix refs/heads or refs/tags (#7939)
|
||||
- fix: Trailing line in Filter Dropdown Menus #7821 (#8001)
|
||||
- fix: Webhook URL matching edge cases (#7981)
|
||||
- fix(ui): Use consistent case for diff modes (#7945)
|
||||
- fix: Use gRPC timeout for sidecar CMPs (#8131) (#8236)
|
||||
|
||||
### Other
|
||||
|
||||
- chore: Bump go-jsonnet to v0.18.0 (#8011)
|
||||
- chore: Escape proj in regex (#7985)
|
||||
- chore: Exclude argocd-server rbac for core-install (#8234)
|
||||
- chore: Log out the resource triggering reconciliation (#8192)
|
||||
- chore: Migrate to use golang-jwt/jwt v4.2.0 (#8136)
|
||||
- chore: Move resolveRevision from api-server to repo-server (#7966)
|
||||
- chore: Update notifications version (#8267)
|
||||
- chore: Update slack version (#8299)
|
||||
- chore: Update to Redis 6.2.4 (#8157)
|
||||
- chore: Upgrade awscli to 2.4.6 and remove python deps (#7947)
|
||||
- chore: Upgrade base image to ubuntu:21.10 (#8230)
|
||||
- chore: Upgrade dex to v2.30.2 (https://github.com/dexidp/dex/issues/2326) (#8237)
|
||||
- chore: Upgrade gitops engine (#8288)
|
||||
- chore: Upgrade golang to 1.17.6 (#8229)
|
||||
- chore: Upgrade helm to most recent version (v3.7.2) (#8226)
|
||||
- chore: Upgrade k8s client to v1.23 (#8213)
|
||||
- chore: Upgrade kustomize to most recent version (v4.4.1) (#8227)
|
||||
- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133)
|
||||
- refactor: Move project filtering to server side (#8102)
|
||||
|
||||
## v2.2.3 (2022-01-18)
|
||||
|
||||
- fix: Application exist panic when execute api call (#8188)
|
||||
- fix: Route health check stuck in 'Progressing' (#8170)
|
||||
- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133)
|
||||
- chore: Update to Redis 6.2.4 (#8157) (#8158)
|
||||
|
||||
## v2.2.2 (2021-12-31)
|
||||
|
||||
- fix: Issue with project scoped resources (#8048)
|
||||
- fix: Escape proj in regex (#7985)
|
||||
- fix: Default value for retry validation #8055 (#8064)
|
||||
- fix: Sync window panel is crashed if resource name not contain letters (#8053)
|
||||
- fix: Upgrade github.com/argoproj/gitops-engine to v0.5.2
|
||||
- fix: Retry disabled text (#8004)
|
||||
- fix: Opening app details shows UI error on some apps (#8016) (#8019)
|
||||
- fix: Correctly handle project field during partial cluster update (#7994)
|
||||
- fix: Cluster API does not support updating labels and annotations (#7901)
|
||||
|
||||
## v2.2.1 (2021-12-16)
|
||||
|
||||
- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971)
|
||||
- fix: Issue with headless installation (#7958)
|
||||
- fix: Nil pointer (#7905)
|
||||
|
||||
## v2.2.0 (2021-12-14)
|
||||
|
||||
> [Upgrade instructions](./docs/operator-manual/upgrading/2.1-2.2.md)
|
||||
|
||||
### Project Scoped repositories and clusters
|
||||
|
||||
The project scoped repositories and clusters is a feature that simplifies registering the repositories and cluster credentials.
|
||||
Instead of requiring operators to set up in advance all clusters and git repositories that can be used, developers can now do
|
||||
this on their own in a self-service manner.
|
||||
|
||||
### Config Management Plugins V2
|
||||
|
||||
The Config Management Plugins V2 is set of enhancement of the existing config management plugins feature.
|
||||
The list includes improved installation experience, ability to package plugin into a separate image and
|
||||
improved plugin manifests discovery.
|
||||
|
||||
### Resource tracking
|
||||
|
||||
Argo CD has traditionally tracked the resources it manages by the well-known "app.kubernetes.io/instance" property.
|
||||
While using this property works ok in simple scenarios, it also has several limitations. ArgoCD now allows you to use
|
||||
a new annotation (argocd.argoproj.io/tracking-id) for tracking your resources. Using this annotation is a much more flexible approach
|
||||
as there are no conflicts with other Kubernetes tools, and you can easily install multiple Argo CD instances on the same clusters.
|
||||
|
||||
### Bug Fixes and Performance Enhancements
|
||||
|
||||
* Argo CD API server caches RBAC checks that significantly improves the GET /api/v1/applications API performance (#7587)
|
||||
* Argo CD RBAC supports regex matches (#7165)
|
||||
* Health check support for KubeVirt (#7176), Cassandra (#7017), Openshift Route (#7112), DeploymentConfig (#7114), Confluent (#6957) and SparkApplication (#7434) CRDs.
|
||||
* Persistent banner (#7312) with custom positioning (#7462)
|
||||
* Cluster name support in project destinations (#7198)
|
||||
* around 30 more features and a total of 84 bug fixes
|
||||
|
||||
## v2.1.7 (2021-12-14)
|
||||
|
||||
- fix: issue with keepalive (#7861)
|
||||
- fix nil pointer dereference error (#7905)
|
||||
- fix: env vars to tune cluster cache were broken (#7779)
|
||||
- fix: upgraded gitops engine to v0.4.2 (fixes #7561)
|
||||
|
||||
|
||||
## v2.1.6 (2021-11-16)
|
||||
|
||||
- fix: don't use revision caching during app creation (#7508)
|
||||
- fix: supporting OCI dependencies. Fixes #6062 (#6994)
|
||||
|
||||
## v2.1.5 (2021-11-16)
|
||||
|
||||
- fix: Invalid memory address or nil pointer dereference in processRequestedAppOperation (#7501)
|
||||
|
||||
## v2.1.4 (2021-11-15)
|
||||
|
||||
- fix: Operation has completed with phase: Running (#7482)
|
||||
- fix: Application status panel shows Syncing instead of Deleting (#7486)
|
||||
- fix(ui): Add Error Boundary around Extensions and comply with new Extensions API (#7215)
|
||||
|
||||
|
||||
## v2.1.3 (2021-10-29)
|
||||
|
||||
- fix: core-install.yaml always refers to latest argocd image (#7321)
|
||||
- fix: handle applicationset backup forbidden error (#7306)
|
||||
- fix: Argo CD should not use cached git/helm revision during app creation/update validation (#7244)
|
||||
|
||||
## v2.1.2 (2021-10-02)
|
||||
|
||||
- fix: cluster filter popping out of box (#7135)
|
||||
- fix: gracefully shutdown metrics server when dex config changes (#7138)
|
||||
- fix: upgrade gitops engine version to v0.4.1 (#7088)
|
||||
- fix: repository name already exists when multiple helm dependencies (#7096)
|
||||
|
||||
|
||||
## v2.1.1 (2021-08-25)
|
||||
|
||||
### Bug Fixes
|
||||
@@ -1029,7 +753,7 @@ More documentation and tools are coming in patch releases.
|
||||
The Argo CD deletes all **in-flight** hooks if you terminate running sync operation. The hook state assessment change implemented in this release the Argo CD enables detection of
|
||||
an in-flight state for all Kubernetes resources including `Deployment`, `PVC`, `StatefulSet`, `ReplicaSet` etc. So if you terminate the sync operation that has, for example,
|
||||
`StatefulSet` hook that is `Progressing` it will be deleted. The long-running jobs are not supposed to be used as a sync hook and you should consider using
|
||||
[Sync Waves](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-waves/) instead.
|
||||
[Sync Waves](https://argoproj.github.io/argo-cd/user-guide/sync-waves/) instead.
|
||||
|
||||
#### Enhancements
|
||||
* feat: Add custom health checks for cert-manager v0.11.0 (#2689)
|
||||
|
||||
86
Dockerfile
86
Dockerfile
@@ -1,17 +1,16 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:21.04
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.18 AS builder
|
||||
FROM docker.io/library/golang:1.16.11 as builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
RUN apt-get update && apt-get install -y \
|
||||
openssh-server \
|
||||
nginx \
|
||||
unzip \
|
||||
fcgiwrap \
|
||||
git \
|
||||
git-lfs \
|
||||
@@ -25,16 +24,19 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
ADD hack/install.sh .
|
||||
ADD hack/installers installers
|
||||
ADD hack/tool-versions.sh .
|
||||
|
||||
RUN ./install.sh helm-linux && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
RUN ./install.sh ksonnet-linux
|
||||
RUN ./install.sh helm2-linux
|
||||
RUN ./install.sh helm-linux
|
||||
RUN ./install.sh kustomize-linux
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
####################################################################################################
|
||||
FROM $BASE_IMAGE AS argocd-base
|
||||
FROM $BASE_IMAGE as argocd-base
|
||||
|
||||
USER root
|
||||
|
||||
@@ -47,13 +49,16 @@ RUN groupadd -g 999 argocd && \
|
||||
chmod g=u /home/argocd && \
|
||||
apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install -y \
|
||||
git git-lfs tini gpg tzdata && \
|
||||
apt-get install -y git git-lfs python3-pip tini gpg tzdata && \
|
||||
apt-get clean && \
|
||||
pip3 install awscli==1.18.80 && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
|
||||
COPY hack/gpg-wrapper.sh /usr/local/bin/gpg-wrapper.sh
|
||||
COPY hack/git-verify-wrapper.sh /usr/local/bin/git-verify-wrapper.sh
|
||||
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
|
||||
COPY --from=builder /usr/local/bin/helm2 /usr/local/bin/helm2
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
|
||||
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
|
||||
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
@@ -61,17 +66,17 @@ COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
|
||||
|
||||
# support for mounting configuration from a configmap
|
||||
WORKDIR /app/config/ssh
|
||||
RUN touch ssh_known_hosts && \
|
||||
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
|
||||
RUN mkdir -p /app/config/ssh && \
|
||||
touch /app/config/ssh/ssh_known_hosts && \
|
||||
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
|
||||
|
||||
WORKDIR /app/config
|
||||
RUN mkdir -p tls && \
|
||||
mkdir -p gpg/source && \
|
||||
mkdir -p gpg/keys && \
|
||||
chown argocd gpg/keys && \
|
||||
chmod 0700 gpg/keys
|
||||
RUN mkdir -p /app/config/tls
|
||||
RUN mkdir -p /app/config/gpg/source && \
|
||||
mkdir -p /app/config/gpg/keys && \
|
||||
chown argocd /app/config/gpg/keys && \
|
||||
chmod 0700 /app/config/gpg/keys
|
||||
|
||||
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
|
||||
ENV USER=argocd
|
||||
|
||||
USER 999
|
||||
@@ -80,37 +85,41 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 AS argocd-ui
|
||||
FROM docker.io/library/node:12.18.4 as argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
ADD ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
|
||||
RUN yarn install --network-timeout 200000 && \
|
||||
yarn cache clean
|
||||
RUN yarn install
|
||||
|
||||
COPY ["ui/", "."]
|
||||
ADD ["ui/", "."]
|
||||
|
||||
ARG ARGO_VERSION=latest
|
||||
ENV ARGO_VERSION=$ARGO_VERSION
|
||||
ARG TARGETARCH
|
||||
RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OPTIONS=--max_old_space_size=8192 yarn build
|
||||
RUN NODE_ENV='production' NODE_ONLINE_ENV='online' yarn build
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.18 AS argocd-build
|
||||
FROM docker.io/library/golang:1.16.11 as argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
COPY go.* ./
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
RUN go mod download
|
||||
|
||||
# Perform the build
|
||||
COPY . .
|
||||
COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make argocd-all
|
||||
RUN make argocd-all
|
||||
|
||||
ARG BUILD_ALL_CLIS=true
|
||||
RUN if [ "$BUILD_ALL_CLIS" = "true" ] ; then \
|
||||
make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all && \
|
||||
make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all \
|
||||
; fi
|
||||
|
||||
####################################################################################################
|
||||
# Final image
|
||||
@@ -119,13 +128,10 @@ FROM argocd-base
|
||||
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
|
||||
|
||||
USER root
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
|
||||
|
||||
USER 999
|
||||
|
||||
@@ -3,13 +3,12 @@
|
||||
####################################################################################################
|
||||
FROM argocd-base
|
||||
COPY argocd /usr/local/bin/
|
||||
COPY argocd-darwin-amd64 /usr/local/bin/
|
||||
COPY argocd-windows-amd64.exe /usr/local/bin/
|
||||
|
||||
USER root
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller
|
||||
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller
|
||||
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex
|
||||
USER 999
|
||||
|
||||
95
Makefile
95
Makefile
@@ -4,8 +4,6 @@ DIST_DIR=${CURRENT_DIR}/dist
|
||||
CLI_NAME=argocd
|
||||
BIN_NAME=argocd
|
||||
|
||||
GEN_RESOURCES_CLI_NAME=argocd-resources-gen
|
||||
|
||||
HOST_OS:=$(shell go env GOOS)
|
||||
HOST_ARCH:=$(shell go env GOARCH)
|
||||
|
||||
@@ -15,7 +13,7 @@ GIT_COMMIT=$(shell git rev-parse HEAD)
|
||||
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
|
||||
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
|
||||
VOLUME_MOUNT=$(shell if test "$(go env GOOS)" = "darwin"; then echo ":delegated"; elif test selinuxenabled; then echo ":delegated"; else echo ""; fi)
|
||||
KUBECTL_VERSION=$(shell go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)
|
||||
KUBECTL_VERSION=$(shell go list -m all | grep k8s.io/client-go | cut -d ' ' -f5)
|
||||
|
||||
GOPATH?=$(shell if test -x `which go`; then go env GOPATH; else echo "$(HOME)/go"; fi)
|
||||
GOCACHE?=$(HOME)/.cache/go-build
|
||||
@@ -25,7 +23,7 @@ DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
ARGOCD_PROCFILE?=Procfile
|
||||
|
||||
# Strict mode has been disabled in latest versions of mkdocs-material.
|
||||
# Strict mode has been disabled in latest versions of mkdocs-material.
|
||||
# Thus pointing to the older image of mkdocs-material matching the version used by argo-cd.
|
||||
MKDOCS_DOCKER_IMAGE?=squidfunk/mkdocs-material:4.1.1
|
||||
MKDOCS_RUN_ARGS?=
|
||||
@@ -47,11 +45,10 @@ ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=30m
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=20m
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
ARGOCD_BIN_MODE?=true
|
||||
|
||||
ARGOCD_LINT_GOGC?=20
|
||||
|
||||
@@ -114,7 +111,7 @@ define run-in-test-client
|
||||
bash -c "$(1)"
|
||||
endef
|
||||
|
||||
#
|
||||
#
|
||||
define exec-in-test-server
|
||||
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
endef
|
||||
@@ -136,6 +133,7 @@ override LDFLAGS += \
|
||||
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
|
||||
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
|
||||
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
|
||||
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
|
||||
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}
|
||||
|
||||
ifeq (${STATIC_BUILD}, true)
|
||||
@@ -179,7 +177,7 @@ gogen: ensure-gopath
|
||||
go generate ./util/argo/...
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: ensure-gopath mod-vendor-local
|
||||
protogen: ensure-gopath
|
||||
export GO111MODULE=off
|
||||
./hack/generate-proto.sh
|
||||
|
||||
@@ -188,16 +186,6 @@ openapigen: ensure-gopath
|
||||
export GO111MODULE=off
|
||||
./hack/update-openapi.sh
|
||||
|
||||
.PHONY: notification-catalog
|
||||
notification-catalog:
|
||||
go run ./hack/gen-catalog catalog
|
||||
|
||||
.PHONY: notification-docs
|
||||
notification-docs:
|
||||
go run ./hack/gen-docs
|
||||
go run ./hack/gen-catalog docs
|
||||
|
||||
|
||||
.PHONY: clientgen
|
||||
clientgen: ensure-gopath
|
||||
export GO111MODULE=off
|
||||
@@ -205,11 +193,10 @@ clientgen: ensure-gopath
|
||||
|
||||
.PHONY: clidocsgen
|
||||
clidocsgen: ensure-gopath
|
||||
go run tools/cmd-docs/main.go
|
||||
|
||||
go run tools/cmd-docs/main.go
|
||||
|
||||
.PHONY: codegen-local
|
||||
codegen-local: ensure-gopath mod-vendor-local notification-docs notification-catalog gogen protogen clientgen openapigen clidocsgen manifests-local
|
||||
codegen-local: ensure-gopath mod-vendor-local gogen protogen clientgen openapigen clidocsgen manifests-local
|
||||
rm -rf vendor/
|
||||
|
||||
.PHONY: codegen
|
||||
@@ -224,19 +211,13 @@ cli: test-tools-image
|
||||
cli-local: clean-debug
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
|
||||
.PHONY: gen-resources-cli-local
|
||||
gen-resources-cli-local: clean-debug
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
|
||||
|
||||
.PHONY: release-cli
|
||||
release-cli: clean-debug build-ui
|
||||
make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all
|
||||
make BIN_NAME=argocd-darwin-arm64 GOOS=darwin GOARCH=arm64 argocd-all
|
||||
make BIN_NAME=argocd-linux-amd64 GOOS=linux argocd-all
|
||||
make BIN_NAME=argocd-linux-arm64 GOOS=linux GOARCH=arm64 argocd-all
|
||||
make BIN_NAME=argocd-linux-ppc64le GOOS=linux GOARCH=ppc64le argocd-all
|
||||
make BIN_NAME=argocd-linux-s390x GOOS=linux GOARCH=s390x argocd-all
|
||||
make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all
|
||||
release-cli: clean-debug image
|
||||
docker create --name tmp-argocd-linux $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)
|
||||
docker cp tmp-argocd-linux:/usr/local/bin/argocd ${DIST_DIR}/argocd-linux-amd64
|
||||
docker cp tmp-argocd-linux:/usr/local/bin/argocd-darwin-amd64 ${DIST_DIR}/argocd-darwin-amd64
|
||||
docker cp tmp-argocd-linux:/usr/local/bin/argocd-windows-amd64.exe ${DIST_DIR}/argocd-windows-amd64.exe
|
||||
docker rm tmp-argocd-linux
|
||||
|
||||
.PHONY: test-tools-image
|
||||
test-tools-image:
|
||||
@@ -254,7 +235,7 @@ manifests: test-tools-image
|
||||
# consolidated binary for cli, util, server, repo-server, controller
|
||||
.PHONY: argocd-all
|
||||
argocd-all: clean-debug
|
||||
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
|
||||
.PHONY: server
|
||||
server: clean-debug
|
||||
@@ -268,21 +249,20 @@ repo-server:
|
||||
controller:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
|
||||
|
||||
.PHONY: build-ui
|
||||
build-ui:
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-ui --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
|
||||
.PHONY: image
|
||||
ifeq ($(DEV_IMAGE), true)
|
||||
# The "dev" image builds the binaries from the users desktop environment (instead of in Docker)
|
||||
# which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since
|
||||
# the dist directory is under .dockerignore.
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 docker build -t argocd-base --target argocd-base .
|
||||
image:
|
||||
docker build -t argocd-base --target argocd-base .
|
||||
docker build -t argocd-ui --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-darwin-amd64 ./cmd
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-windows-amd64.exe ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
|
||||
@@ -292,13 +272,15 @@ image: build-ui
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
|
||||
.PHONY: armimage
|
||||
# The "BUILD_ALL_CLIS" argument is to skip building the CLIs for darwin and windows
|
||||
# which would take a really long time.
|
||||
armimage:
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm . --build-arg BUILD_ALL_CLIS="false"
|
||||
|
||||
.PHONY: builder-image
|
||||
builder-image:
|
||||
@@ -420,11 +402,10 @@ start-e2e: test-tools-image
|
||||
|
||||
# Starts e2e server locally (or within a container)
|
||||
.PHONY: start-e2e-local
|
||||
start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
start-e2e-local:
|
||||
kubectl create ns argocd-e2e || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
@@ -437,11 +418,9 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
BIN_MODE=$(ARGOCD_BIN_MODE) \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
|
||||
@@ -514,6 +493,10 @@ serve-docs-local:
|
||||
serve-docs:
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000
|
||||
|
||||
.PHONY: lint-docs
|
||||
lint-docs:
|
||||
# https://github.com/dkhamsing/awesome_bot
|
||||
find docs -name '*.md' -exec grep -l http {} + | xargs docker run --rm -v $(PWD):/mnt:ro dkhamsing/awesome_bot -t 3 --allow-dupe --allow-redirect --white-list `cat white-list | grep -v "#" | tr "\n" ','` --skip-save-results --
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
.PHONY: verify-kube-connect
|
||||
@@ -535,7 +518,9 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
|
||||
# Installs all tools required for running unit & end-to-end tests (Linux packages)
|
||||
.PHONY: install-test-tools-local
|
||||
install-test-tools-local:
|
||||
./hack/install.sh kustomize
|
||||
./hack/install.sh kustomize-linux
|
||||
./hack/install.sh ksonnet-linux
|
||||
./hack/install.sh helm2-linux
|
||||
./hack/install.sh helm-linux
|
||||
|
||||
# Installs all tools required for running codegen (Linux packages)
|
||||
@@ -557,15 +542,3 @@ dep-ui-local:
|
||||
|
||||
start-test-k8s:
|
||||
go run ./hack/k8s
|
||||
|
||||
.PHONY: list
|
||||
list:
|
||||
@LC_ALL=C $(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$'
|
||||
|
||||
.PHONY: applicationset-controller
|
||||
applicationset-controller:
|
||||
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
|
||||
|
||||
.PHONY: checksums
|
||||
checksums:
|
||||
sha256sum ./dist/$(BIN_NAME)-* | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BIN_NAME)-$(TARGET_VERSION)-checksums.txt
|
||||
|
||||
8
OWNERS
8
OWNERS
@@ -8,22 +8,16 @@ approvers:
|
||||
- jannfis
|
||||
- jessesuen
|
||||
- jgwest
|
||||
- keithchong
|
||||
- mayzhang2000
|
||||
- rbreeze
|
||||
- leoluz
|
||||
- crenshaw-dev
|
||||
- pasha-codefresh
|
||||
|
||||
reviewers:
|
||||
- dthomson25
|
||||
- tetchel
|
||||
- terrytangyuan
|
||||
- wtam2018
|
||||
- ishitasequeira
|
||||
- reginapizza
|
||||
- hblixt
|
||||
- chetan-rns
|
||||
- wanghong230
|
||||
- ciiay
|
||||
- saumeya
|
||||
- pasha-codefresh
|
||||
11
Procfile
11
Procfile
@@ -1,12 +1,9 @@
|
||||
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server go run ./cmd/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} "
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:7.0.0-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" == 'true' ] && exit 0 || [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:6.2.6-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-/tmp/argo-e2e/app/config/plugin} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} go run ./cmd/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
|
||||
|
||||
14
README.md
14
README.md
@@ -1,4 +1,8 @@
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [](https://argoproj.github.io/community/join-slack) [](https://codecov.io/gh/argoproj/argo-cd) [](https://github.com/argoproj/argo-cd/releases/latest) [](https://bestpractices.coreinfrastructure.org/projects/4486) [](https://twitter.com/argoproj)
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://codecov.io/gh/argoproj/argo-cd)
|
||||
[](https://github.com/argoproj/argo-cd/releases/latest)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4486)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
@@ -32,8 +36,8 @@ Check live demo at https://cd.apps.argoproj.io/.
|
||||
|
||||
* Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions)
|
||||
* Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack)
|
||||
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
* User Community meeting: [First Wednesday of the month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
|
||||
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
|
||||
* User Community meeting: [Every other Wednesday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
|
||||
|
||||
Participation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
|
||||
@@ -42,7 +46,6 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
### Blogs and Presentations
|
||||
|
||||
1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
|
||||
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://blog.akuity.io/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argo-cd-7c5b4057ee49)
|
||||
1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)
|
||||
1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)
|
||||
1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)
|
||||
@@ -69,6 +72,3 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
|
||||
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
|
||||
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
|
||||
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
|
||||
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
|
||||
|
||||
|
||||
22
SECURITY.md
22
SECURITY.md
@@ -1,12 +1,12 @@
|
||||
# Security Policy for Argo CD
|
||||
|
||||
Version: **v1.4 (2022-01-23)**
|
||||
Version: **v1.2 (2020-08-07)**
|
||||
|
||||
## Preface
|
||||
|
||||
As a deployment tool, Argo CD needs to have production access which makes
|
||||
security a very important topic. The Argoproj team takes security very
|
||||
seriously and is continuously working on improving it.
|
||||
seriously and is continuously working on improving it.
|
||||
|
||||
## A word about security scanners
|
||||
|
||||
@@ -26,12 +26,8 @@ are well aware of the issues that may affect Argo CD and are constantly
|
||||
working on the remediation of those that affect Argo CD and our users.
|
||||
|
||||
If you believe that we might have missed an issue that we should take a look
|
||||
at (that can happen), then please discuss it with us. If there is a CVE
|
||||
assigned to the issue, please do open an issue on our GitHub tracker instead
|
||||
of writing to the security contact e-mail, since things reported by scanners
|
||||
are public already and the discussion that might emerge is of benefit to the
|
||||
general community. However, please validate your scanner results and its
|
||||
impact on Argo CD before opening an issue at least roughly.
|
||||
at (that can happen), then please discuss it with us. But please, do validate
|
||||
that assumption before at least roughly.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
@@ -60,17 +56,11 @@ We will do our best to react quickly on your inquiry, and to coordinate a fix
|
||||
and disclosure with you. Sometimes, it might take a little longer for us to
|
||||
react (e.g. out of office conditions), so please bear with us in these cases.
|
||||
|
||||
We will publish security advisories using the
|
||||
We will publish security advisiories using the
|
||||
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
|
||||
feature to keep our community well informed, and will credit you for your
|
||||
findings (unless you prefer to stay anonymous, of course).
|
||||
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
Please report vulnerabilities by e-mail to the following address:
|
||||
|
||||
* cncf-argo-security@lists.cncf.io
|
||||
|
||||
## Securing your Argo CD Instance
|
||||
|
||||
See the [operator manual security page](docs/operator-manual/security.md) for
|
||||
additional information about Argo CD's security features and how to make your
|
||||
Argo CD production ready.
|
||||
|
||||
84
USERS.md
84
USERS.md
@@ -11,10 +11,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Adventure](https://jp.adventurekk.com/)
|
||||
1. [Akuity](https://akuity.io/)
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
1. [Allianz Direct](https://www.allianzdirect.de/)
|
||||
1. [Ambassador Labs](https://www.getambassador.io/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
|
||||
@@ -23,32 +22,23 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
|
||||
1. [Beat](https://thebeat.co/en/)
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BMW Group](https://www.bmwgroup.com/)
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Cobalt](https://www.cobalt.io/)
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [CROZ d.o.o.](https://croz.net/)
|
||||
1. [Crédit Agricole CIB](https://www.ca-cib.com)
|
||||
1. [CROZ d.o.o.](https://croz.net/)
|
||||
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
1. [Cybozu](https://cybozu-global.com)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [D2iQ](https://www.d2iq.com)
|
||||
1. [Datarisk](https://www.datarisk.io/)
|
||||
1. [Deloitte](https://www.deloitte.com/)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [edX](https://edx.org)
|
||||
@@ -56,58 +46,42 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Elium](https://www.elium.com)
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flip](https://flip.id)
|
||||
1. [Fonoa](https://www.fonoa.com/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
1. [Future PLC](https://www.futureplc.com/)
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Garner](https://www.garnercorp.com)
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Generali Deutschland AG](https://www.generali.de/)
|
||||
1. [Gitpod](https://www.gitpod.io)
|
||||
1. [Gllue](https://gllue.com)
|
||||
1. [Glovo](https://www.glovoapp.com)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Gojek](https://www.gojek.io/)
|
||||
1. [Greenpass](https://www.greenpass.com.br/)
|
||||
1. [Handelsbanken](https://www.handelsbanken.se)
|
||||
1. [Healy](https://www.healyworld.net)
|
||||
1. [Helio](https://helio.exchange)
|
||||
1. [hipages](https://hipages.com.au/)
|
||||
1. [Hiya](https://hiya.com)
|
||||
1. [Honestbank](https://honestbank.com)
|
||||
1. [IBM](https://www.ibm.com/)
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
1. [JovianX](https://www.jovianx.com/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
1. [KarrotPay](https://www.daangnpay.com/)
|
||||
1. [Karrot](https://www.daangn.com/)
|
||||
1. [KarrotPay](https://www.daangnpay.com/)
|
||||
1. [Kasa](https://kasa.co.kr/)
|
||||
1. [Keeeb](https://www.keeeb.com/)
|
||||
1. [Keptn](https://keptn.sh)
|
||||
1. [Kinguin](https://www.kinguin.net/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
1. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
1. [KubeSphere](https://github.com/kubesphere)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [LINE](https://linecorp.com/en/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
|
||||
1. [Major League Baseball](https://mlb.com)
|
||||
1. [Mambu](https://www.mambu.com/)
|
||||
1. [MariaDB](https://mariadb.com)
|
||||
1. [Mattermost](https://www.mattermost.com)
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
1. [mixi Group](https://mixi.co.jp/)
|
||||
@@ -119,9 +93,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [New Relic](https://newrelic.com/)
|
||||
1. [Nextdoor](https://nextdoor.com/)
|
||||
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
|
||||
1. [Nitro](https://gonitro.com)
|
||||
1. [Octadesk](https://octadesk.com)
|
||||
1. [omegaUp](https://omegaUp.com)
|
||||
1. [openEuler](https://openeuler.org)
|
||||
1. [openGauss](https://opengauss.org/)
|
||||
1. [openLooKeng](https://openlookeng.io)
|
||||
@@ -129,7 +101,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Opensurvey](https://www.opensurvey.co.kr/)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
@@ -143,34 +114,21 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Quipper](https://www.quipper.com/)
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Robotinfra](https://www.robotinfra.com)
|
||||
1. [Rubin Observatory](https://www.lsst.org)
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Speee](https://speee.jp/)
|
||||
1. [Spendesk](https://spendesk.com/)
|
||||
1. [Spores Labs](https://spores.app)
|
||||
1. [Stuart](https://stuart.com/)
|
||||
1. [Sumo Logic](https://sumologic.com/)
|
||||
1. [Sutpc](http://www.sutpc.com/)
|
||||
1. [Swiss Post](https://github.com/swisspost)
|
||||
1. [Swisscom](https://www.swisscom.ch)
|
||||
1. [Swissquote](https://github.com/swissquote)
|
||||
1. [Syncier](https://syncier.com/)
|
||||
1. [TableCheck](https://tablecheck.com/)
|
||||
1. [Tailor Brands](https://www.tailorbrands.com)
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [ThousandEyes](https://www.thousandeyes.com/)
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
@@ -180,10 +138,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [tru.ID](https://tru.id)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
@@ -191,18 +148,33 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [VSHN - The DevOps Company](https://vshn.ch/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
1. [Wehkamp](https://www.wehkamp.nl/)
|
||||
1. [WeMo Scooter](https://www.wemoscooter.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
|
||||
1. [Witick](https://witick.io/)
|
||||
1. [WooliesX](https://wooliesx.com.au/)
|
||||
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
|
||||
1. [WSpot](https://www.wspot.com.br/)
|
||||
1. [Yieldlab](https://www.yieldlab.de/)
|
||||
1. [Youverify](https://youverify.co/)
|
||||
1. [Yubo](https://www.yubo.live/)
|
||||
1. [Zimpler](https://www.zimpler.com/)
|
||||
1. [ZOZO](https://corp.zozo.com/)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [RapidAPI](https://www.rapidapi.com/)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [MariaDB](https://mariadb.com)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
1. [Youverify](https://youverify.co/)
|
||||
1. [Keeeb](https://www.keeeb.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
|
||||
@@ -1,732 +0,0 @@
|
||||
/*
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
log "github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
)
|
||||
|
||||
const (
|
||||
// Rather than importing the whole argocd-notifications controller, just copying the const here
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/subscriptions.go#L12
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
|
||||
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
|
||||
ReconcileRequeueOnValidationError = time.Minute * 3
|
||||
)
|
||||
|
||||
var (
|
||||
preservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
}
|
||||
)
|
||||
|
||||
// ApplicationSetReconciler reconciles a ApplicationSet object
|
||||
type ApplicationSetReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
Generators map[string]generators.Generator
|
||||
ArgoDB db.ArgoDB
|
||||
ArgoAppClientset appclientset.Interface
|
||||
KubeClientset kubernetes.Interface
|
||||
utils.Policy
|
||||
utils.Renderer
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch
|
||||
|
||||
func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = r.Log.WithValues("applicationset", req.NamespacedName)
|
||||
_ = log.WithField("applicationset", req.NamespacedName)
|
||||
|
||||
var applicationSetInfo argoprojiov1alpha1.ApplicationSet
|
||||
parametersGenerated := false
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, &applicationSetInfo); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
log.WithError(err).Infof("unable to get ApplicationSet: '%v' ", err)
|
||||
}
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
|
||||
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Log a warning if there are unrecognized generators
|
||||
utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: string(applicationSetReason),
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
parametersGenerated = true
|
||||
|
||||
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo, req.Namespace)
|
||||
if err != nil {
|
||||
// While some generators may return an error that requires user intervention,
|
||||
// other generators reference external resources that may change to cause
|
||||
// the error to no longer occur. We thus log the error and requeue
|
||||
// with a timeout to give this another shot at a later time.
|
||||
//
|
||||
// Changes to watched resources will cause this to be reconciled sooner than
|
||||
// the RequeueAfter time.
|
||||
log.Errorf("error occurred during application validation: %s", err.Error())
|
||||
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range desiredApplications {
|
||||
if validateErrors[i] == nil {
|
||||
validApps = append(validApps, desiredApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(validateErrors) > 0 {
|
||||
var message string
|
||||
for _, v := range validateErrors {
|
||||
message = v.Error()
|
||||
log.Errorf("validation error found during application validation: %s", message)
|
||||
}
|
||||
if len(validateErrors) > 1 {
|
||||
// Only the last message gets added to the appset status, to keep the size reasonable.
|
||||
message = fmt.Sprintf("%s (and %d more)", message, len(validateErrors)-1)
|
||||
}
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: message,
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
}
|
||||
|
||||
if r.Policy.Update() {
|
||||
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonUpdateApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
err = r.createInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonCreateApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Policy.Delete() {
|
||||
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonDeleteApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if applicationSetInfo.RefreshRequired() {
|
||||
delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh)
|
||||
err := r.Client.Update(ctx, &applicationSetInfo)
|
||||
if err != nil {
|
||||
log.Warnf("error occurred while updating ApplicationSet: %v", err)
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonRefreshApplicationError,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
requeueAfter := r.getMinRequeueAfter(&applicationSetInfo)
|
||||
log.WithField("requeueAfter", requeueAfter).Info("end reconcile")
|
||||
|
||||
if len(validateErrors) == 0 {
|
||||
if err := r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{
|
||||
RequeueAfter: requeueAfter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getParametersGeneratedCondition(parametersGenerated bool, message string) argoprojiov1alpha1.ApplicationSetCondition {
|
||||
var paramtersGeneratedCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
if parametersGenerated {
|
||||
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonParametersGenerated,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
} else {
|
||||
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: message,
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonErrorOccurred,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
return paramtersGeneratedCondition
|
||||
}
|
||||
|
||||
func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argoprojiov1alpha1.ApplicationSetCondition {
|
||||
var resourceUpToDateCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
if errorOccurred {
|
||||
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: message,
|
||||
Reason: reason,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
} else {
|
||||
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "ApplicationSet up to date",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
}
|
||||
return resourceUpToDateCondition
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argoprojiov1alpha1.ApplicationSet, condition argoprojiov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error {
|
||||
// check if error occurred during reconcile process
|
||||
errOccurred := condition.Type == argoprojiov1alpha1.ApplicationSetConditionErrorOccurred
|
||||
|
||||
var errOccurredCondition argoprojiov1alpha1.ApplicationSetCondition
|
||||
|
||||
if errOccurred {
|
||||
errOccurredCondition = condition
|
||||
} else {
|
||||
errOccurredCondition = argoprojiov1alpha1.ApplicationSetCondition{
|
||||
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
|
||||
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
|
||||
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
|
||||
|
||||
newConditions := []argoprojiov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
|
||||
|
||||
needToUpdateConditions := false
|
||||
for _, condition := range newConditions {
|
||||
// do nothing if appset already has same condition
|
||||
for _, c := range applicationSet.Status.Conditions {
|
||||
if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) {
|
||||
needToUpdateConditions = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
evaluatedTypes := map[argoprojiov1alpha1.ApplicationSetConditionType]bool{
|
||||
argoprojiov1alpha1.ApplicationSetConditionErrorOccurred: true,
|
||||
argoprojiov1alpha1.ApplicationSetConditionParametersGenerated: true,
|
||||
argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
|
||||
}
|
||||
|
||||
if needToUpdateConditions || len(applicationSet.Status.Conditions) < 3 {
|
||||
// fetch updated Application Set object before updating it
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
|
||||
applicationSet.Status.SetConditions(
|
||||
newConditions, evaluatedTypes,
|
||||
)
|
||||
|
||||
// Update the newly fetched object with new set of conditions
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
if err != nil && !apierr.IsNotFound(err) {
|
||||
return fmt.Errorf("unable to set application set condition: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
|
||||
// generated applications.
|
||||
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argoprojiov1alpha1.ApplicationSet, namespace string) (map[int]error, error) {
|
||||
errorsByIndex := map[int]error{}
|
||||
namesSet := map[string]bool{}
|
||||
for i, app := range desiredApplications {
|
||||
|
||||
if !namesSet[app.Name] {
|
||||
namesSet[app.Name] = true
|
||||
} else {
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, namespace); err != nil {
|
||||
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return errorsByIndex, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argoprojiov1alpha1.ApplicationSet) time.Duration {
|
||||
var res time.Duration
|
||||
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
|
||||
|
||||
relevantGenerators := generators.GetRelevantGenerators(&requestedGenerator, r.Generators)
|
||||
|
||||
for _, g := range relevantGenerators {
|
||||
t := g.GetRequeueAfter(&requestedGenerator)
|
||||
|
||||
if res == 0 {
|
||||
res = t
|
||||
} else if t != 0 && t < res {
|
||||
res = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func getTempApplication(applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) *argov1alpha1.Application {
|
||||
var tmplApplication argov1alpha1.Application
|
||||
tmplApplication.Annotations = applicationSetTemplate.Annotations
|
||||
tmplApplication.Labels = applicationSetTemplate.Labels
|
||||
tmplApplication.Namespace = applicationSetTemplate.Namespace
|
||||
tmplApplication.Name = applicationSetTemplate.Name
|
||||
tmplApplication.Spec = applicationSetTemplate.Spec
|
||||
tmplApplication.Finalizers = applicationSetTemplate.Finalizers
|
||||
|
||||
return &tmplApplication
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argoprojiov1alpha1.ApplicationSetReasonType, error) {
|
||||
var res []argov1alpha1.Application
|
||||
|
||||
var firstError error
|
||||
var applicationSetReason argoprojiov1alpha1.ApplicationSetReasonType
|
||||
|
||||
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
|
||||
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonApplicationParamsGenerationError
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, a := range t {
|
||||
tmplApplication := getTempApplication(a.Template)
|
||||
|
||||
for _, p := range a.Params {
|
||||
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonRenderTemplateParamsError
|
||||
}
|
||||
continue
|
||||
}
|
||||
res = append(res, *app)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
|
||||
log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
|
||||
}
|
||||
|
||||
return res, applicationSetReason, firstError
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
|
||||
// grab the job object, extract the owner...
|
||||
app := rawObj.(*argov1alpha1.Application)
|
||||
owner := metav1.GetControllerOf(app)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
// ...make sure it's a application set...
|
||||
if owner.APIVersion != argoprojiov1alpha1.GroupVersion.String() || owner.Kind != "ApplicationSet" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&argoprojiov1alpha1.ApplicationSet{}).
|
||||
Owns(&argov1alpha1.Application{}).
|
||||
Watches(
|
||||
&source.Kind{Type: &corev1.Secret{}},
|
||||
&clusterSecretEventHandler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
}).
|
||||
// TODO: also watch Applications and respond on changes if we own them.
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// createOrUpdateInCluster will create / update application resources in the cluster.
|
||||
// - For new applications, it will call create
|
||||
// - For existing application, it will call update
|
||||
// The function also adds owner reference to all applications, and uses it to delete them.
|
||||
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var firstError error
|
||||
// Creates or updates the application in appList
|
||||
for _, generatedApp := range desiredApplications {
|
||||
|
||||
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
|
||||
generatedApp.Namespace = applicationSet.Namespace
|
||||
|
||||
found := &argov1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: generatedApp.Name,
|
||||
Namespace: generatedApp.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
// Preserve specially treated argo cd annotations:
|
||||
// * https://github.com/argoproj/applicationset/issues/180
|
||||
// * https://github.com/argoproj/argo-cd/issues/10500
|
||||
for _, key := range preservedAnnotations {
|
||||
if state, exists := found.ObjectMeta.Annotations[key]; exists {
|
||||
if generatedApp.Annotations == nil {
|
||||
generatedApp.Annotations = map[string]string{}
|
||||
}
|
||||
generatedApp.Annotations[key] = state
|
||||
}
|
||||
}
|
||||
found.ObjectMeta.Annotations = generatedApp.Annotations
|
||||
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action)
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// createInCluster will filter from the desiredApplications only the application that needs to be created
|
||||
// Then it will call createOrUpdateInCluster to do the actual create
|
||||
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var createApps []argov1alpha1.Application
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names that are current in the cluster
|
||||
|
||||
for _, app := range current {
|
||||
m[app.Name] = true
|
||||
}
|
||||
|
||||
// filter applications that are not in m[string]bool (new to the cluster)
|
||||
for _, app := range desiredApplications {
|
||||
_, exists := m[app.Name]
|
||||
|
||||
if !exists {
|
||||
createApps = append(createApps, app)
|
||||
}
|
||||
}
|
||||
|
||||
return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
|
||||
// TODO: Should this use the context param?
|
||||
var current argov1alpha1.ApplicationList
|
||||
err := r.Client.List(context.Background(), ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return current.Items, nil
|
||||
}
|
||||
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
|
||||
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
|
||||
// clusterList, err := argoDB.ListClusters(ctx)
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, applicationSet.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save current applications to be able to delete the ones that are not in appList
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
|
||||
|
||||
for _, app := range desiredApplications {
|
||||
m[app.Name] = true
|
||||
}
|
||||
|
||||
// Delete apps that are not in m[string]bool
|
||||
var firstError error
|
||||
for _, app := range current {
|
||||
appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
|
||||
_, exists := m[app.Name]
|
||||
|
||||
if !exists {
|
||||
|
||||
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
|
||||
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
|
||||
if err != nil {
|
||||
appLog.WithError(err).Error("failed to update Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.Client.Delete(ctx, &app)
|
||||
if err != nil {
|
||||
appLog.WithError(err).Error("failed to delete Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
|
||||
appLog.Log(log.InfoLevel, "Deleted application")
|
||||
}
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// removeFinalizerOnInvalidDestination removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
|
||||
func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, app *argov1alpha1.Application, clusterList *argov1alpha1.ClusterList, appLog *log.Entry) error {
|
||||
|
||||
// Only check if the finalizers need to be removed IF there are finalizers to remove
|
||||
if len(app.Finalizers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validDestination bool
|
||||
|
||||
// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
|
||||
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, applicationSet.Namespace); err != nil {
|
||||
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
|
||||
validDestination = false
|
||||
} else {
|
||||
|
||||
// Detect if the destination's server field does not match an existing cluster
|
||||
|
||||
matchingCluster := false
|
||||
for _, cluster := range clusterList.Items {
|
||||
|
||||
// Server fields must match. Note that ValidateDestination ensures that the server field is set, if applicable.
|
||||
if app.Spec.Destination.Server != cluster.Server {
|
||||
continue
|
||||
}
|
||||
|
||||
// The name must match, if it is not empty
|
||||
if app.Spec.Destination.Name != "" && cluster.Name != app.Spec.Destination.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
matchingCluster = true
|
||||
break
|
||||
}
|
||||
|
||||
if !matchingCluster {
|
||||
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name)
|
||||
}
|
||||
|
||||
validDestination = matchingCluster
|
||||
}
|
||||
// If the destination is invalid (for example the cluster is no longer defined), then remove
|
||||
// the application finalizers to avoid triggering Argo CD bug #5817
|
||||
if !validDestination {
|
||||
|
||||
// Filter out the Argo CD finalizer from the finalizer list
|
||||
var newFinalizers []string
|
||||
for _, existingFinalizer := range app.Finalizers {
|
||||
if existingFinalizer != argov1alpha1.ResourcesFinalizerName { // only remove this one
|
||||
newFinalizers = append(newFinalizers, existingFinalizer)
|
||||
}
|
||||
}
|
||||
|
||||
// If the finalizer length changed (due to filtering out an Argo finalizer), update the finalizer list on the app
|
||||
if len(newFinalizers) != len(app.Finalizers) {
|
||||
app.Finalizers = newFinalizers
|
||||
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Updated", "Updated Application %q finalizer before deletion, because application has an invalid destination", app.Name)
|
||||
appLog.Log(log.InfoLevel, "Updating application finalizer before deletion, because application has an invalid destination")
|
||||
|
||||
err := r.Client.Update(ctx, app, &client.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,84 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// clusterSecretEventHandler is used when watching Secrets to check if they are ArgoCD Cluster Secrets, and if so
|
||||
// requeue any related ApplicationSets.
|
||||
type clusterSecretEventHandler struct {
|
||||
//handler.EnqueueRequestForOwner
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.ObjectNew)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
h.queueRelatedAppGenerators(q, e.Object)
|
||||
}
|
||||
|
||||
// addRateLimitingInterface defines the Add method of workqueue.RateLimitingInterface, allow us to easily mock
|
||||
// it for testing purposes.
|
||||
type addRateLimitingInterface interface {
|
||||
Add(item interface{})
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingInterface, object client.Object) {
|
||||
|
||||
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
|
||||
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
|
||||
return
|
||||
}
|
||||
|
||||
h.Log.WithFields(log.Fields{
|
||||
"namespace": object.GetNamespace(),
|
||||
"name": object.GetName(),
|
||||
}).Info("processing event for cluster secret")
|
||||
|
||||
appSetList := &argoprojiov1alpha1.ApplicationSetList{}
|
||||
err := h.Client.List(context.Background(), appSetList)
|
||||
if err != nil {
|
||||
h.Log.WithError(err).Error("unable to list ApplicationSets")
|
||||
return
|
||||
}
|
||||
|
||||
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
|
||||
for _, appSet := range appSetList.Items {
|
||||
|
||||
foundClusterGenerator := false
|
||||
for _, generator := range appSet.Spec.Generators {
|
||||
if generator.Clusters != nil {
|
||||
foundClusterGenerator = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if foundClusterGenerator {
|
||||
|
||||
// TODO: only queue the AppGenerator if the labels match this cluster
|
||||
req := ctrl.Request{NamespacedName: types.NamespacedName{Namespace: appSet.Namespace, Name: appSet.Name}}
|
||||
q.Add(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestClusterEventHandler(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := argoprojiov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = argov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
items []argoprojiov1alpha1.ApplicationSet
|
||||
secret corev1.Secret
|
||||
expectedRequests []ctrl.Request
|
||||
}{
|
||||
{
|
||||
name: "no application sets should mean no requests",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "a cluster generator should produce a request",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{{
|
||||
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "multiple cluster generators should produce multiple requests",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set2",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set2"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-cluster generator should not match",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "app-set-non-cluster",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argoprojiov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-non-argocd-secret",
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
appSetList := argoprojiov1alpha1.ApplicationSetList{
|
||||
Items: test.items,
|
||||
}
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
|
||||
|
||||
handler := &clusterSecretEventHandler{
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
}
|
||||
|
||||
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
|
||||
|
||||
handler.queueRelatedAppGenerators(&mockAddRateLimitingInterface, &test.secret)
|
||||
|
||||
assert.False(t, mockAddRateLimitingInterface.errorOccurred)
|
||||
assert.ElementsMatch(t, mockAddRateLimitingInterface.addedItems, test.expectedRequests)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add checks the type, and adds it to the internal list of received additions
|
||||
func (obj *mockAddRateLimitingInterface) Add(item interface{}) {
|
||||
if req, ok := item.(ctrl.Request); ok {
|
||||
obj.addedItems = append(obj.addedItems, req)
|
||||
} else {
|
||||
obj.errorOccurred = true
|
||||
}
|
||||
}
|
||||
|
||||
type mockAddRateLimitingInterface struct {
|
||||
errorOccurred bool
|
||||
addedItems []ctrl.Request
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
mockServer := argoCDServiceMock{}
|
||||
ctx := context.Background()
|
||||
scheme := runtime.NewScheme()
|
||||
err := argoappsetv1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{{
|
||||
Group: "mallard.io",
|
||||
Version: "v1",
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
appClientset := kubefake.NewSimpleClientset()
|
||||
k8sClient := fake.NewClientBuilder().Build()
|
||||
duckType := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v2quack",
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "mightyduck",
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "all-species"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "staging-01",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build()),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(terminalGenerators),
|
||||
"Merge": generators.NewMergeGenerator(terminalGenerators),
|
||||
}
|
||||
|
||||
topLevelGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(nestedGenerators),
|
||||
"Merge": generators.NewMergeGenerator(nestedGenerators),
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(0),
|
||||
Generators: topLevelGenerators,
|
||||
}
|
||||
|
||||
type args struct {
|
||||
appset *argoappsetv1alpha1.ApplicationSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want time.Duration
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{name: "Cluster", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{Clusters: &argoappsetv1alpha1.ClusterGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
{name: "ClusterMergeNested", args: args{&argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
|
||||
{Merge: &argoappsetv1alpha1.MergeGenerator{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
|
||||
Git: &argoappsetv1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ClusterMatrixNested", args: args{&argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
|
||||
{Matrix: &argoappsetv1alpha1.MatrixGenerator{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
|
||||
Git: &argoappsetv1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ListGenerator", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
|
||||
Spec: argoappsetv1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{List: &argoappsetv1alpha1.ListGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,57 +0,0 @@
|
||||
# How the Cluster Decision Resource generator works for clusterDecisionResource
|
||||
1. The Cluster Decision Resource generator reads a configurable status format:
|
||||
```yaml
|
||||
status:
|
||||
clusters:
|
||||
- name: cluster-01
|
||||
- name: cluster-02
|
||||
```
|
||||
This is a common status format. Another format that could be read looks like this:
|
||||
```yaml
|
||||
status:
|
||||
decisions:
|
||||
- clusterName: cluster-01
|
||||
namespace: cluster-01
|
||||
- clusterName: cluster-02
|
||||
namespace: cluster-02
|
||||
```
|
||||
2. Any resource that has a list of key / value pairs, where the value matches ArgoCD cluster names can be used.
|
||||
3. The key / value pairs found in each element of the list will be available to the template. As well, `name` and `server` will still be available to the template.
|
||||
4. The Service Account used by the ApplicationSet controller must have access to `Get` the resource you want to retrieve the duck type definition from
|
||||
5. A configMap is used to identify the resource to read status of generated ArgoCD clusters from. You can use multiple resources by creating a ConfigMap for each one in the ArgoCD namespace.
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: my-configmap
|
||||
data:
|
||||
apiVersion: group.io/v1
|
||||
kind: mykinds
|
||||
statusListKey: clusters
|
||||
matchKey: name
|
||||
```
|
||||
* `apiVersion` - This is the apiVersion of your resource
|
||||
* `kind` - This is the plural kind of your resource
|
||||
* `statusListKey` - Default is 'clusters', this is the key found in your resource's status that is a list of ArgoCD clusters.
|
||||
* `matchKey` - Is the key name found in the cluster list, `name` and `clusterName` are the keys in the examples above.
|
||||
|
||||
# Applying the example
|
||||
1. Connect to a cluster with the ApplicationSet controller running
|
||||
2. Edit the Role for the ApplicationSet service account, and grant it permission to `list` the `placementdecisions` resources, from apiGroups `cluster.open-cluster-management.io/v1alpha1`
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- "cluster.open-cluster-management.io/v1alpha1"
|
||||
resources:
|
||||
- placementdecisions
|
||||
verbs:
|
||||
- list
|
||||
```
|
||||
3. Apply the following controller and associated ManagedCluster CRD's:
|
||||
https://github.com/open-cluster-management/placement
|
||||
4. Now apply the PlacementDecision and an ApplicationSet:
|
||||
```bash
|
||||
kubectl apply -f ./placementdecision.yaml
|
||||
kubectl apply -f ./configMap.yaml
|
||||
kubectl apply -f ./ducktype-example.yaml
|
||||
```
|
||||
5. For now this won't do anything until you create a controller that populates the `Status.Decisions` array.
|
||||
@@ -1,11 +0,0 @@
|
||||
# To generate a Status.Decisions from this CRD, requires https://github.com/open-cluster-management/multicloud-operators-placementrule be deployed
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ocm-placement
|
||||
data:
|
||||
apiVersion: apps.open-cluster-management.io/v1
|
||||
kind: placementrules
|
||||
statusListKey: decisions
|
||||
matchKey: clusterName
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: book-import
|
||||
spec:
|
||||
generators:
|
||||
- clusterDecisionResource:
|
||||
configMapRef: ocm-placement
|
||||
name: test-placement
|
||||
requeueAfterSeconds: 30
|
||||
template:
|
||||
metadata:
|
||||
name: '{{clusterName}}-book-import'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/open-cluster-management/application-samples.git
|
||||
targetRevision: HEAD
|
||||
path: book-import
|
||||
destination:
|
||||
name: '{{clusterName}}'
|
||||
namespace: bookimport
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,18 +0,0 @@
|
||||
---
|
||||
apiVersion: apps.open-cluster-management.io/v1
|
||||
kind: PlacementRule
|
||||
metadata:
|
||||
name: test-placement
|
||||
spec:
|
||||
clusterReplicas: 1 # Availability choice, maximum number of clusters to provision at once
|
||||
clusterSelector:
|
||||
matchLabels:
|
||||
'usage': 'development'
|
||||
clusterConditions:
|
||||
- type: ManagedClusterConditionAvailable
|
||||
status: "True"
|
||||
# Below is sample output the generator can consume.
|
||||
status:
|
||||
decisions:
|
||||
- clusterName: cluster-01
|
||||
- clusterName: cluster-02
|
||||
@@ -1,22 +0,0 @@
|
||||
# This is an example of a typical ApplicationSet which uses the cluster generator.
|
||||
# An ApplicationSet is comprised with two stanzas:
|
||||
# - spec.generator - producer of a list of values supplied as arguments to an app template
|
||||
# - spec.template - an application template, which has been parameterized
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters: {}
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
chart: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,33 +0,0 @@
|
||||
# The cluster generator produces an items list from all clusters registered to Argo CD.
|
||||
# It automatically provides the following fields as values to the app template:
|
||||
# - name
|
||||
# - server
|
||||
# - metadata.labels.<key>
|
||||
# - metadata.annotations.<key>
|
||||
# - values.<key>
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
values:
|
||||
project: default
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
labels:
|
||||
environment: '{{metadata.labels.environment}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
chart: guestbook
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,44 +0,0 @@
|
||||
# This example demonstrates the git directory generator, which produces an items list
|
||||
# based on discovery of directories in a git repo matching a specified pattern.
|
||||
# Git generators automatically provide {{path}} and {{path.basename}} as available
|
||||
# variables to the app template.
|
||||
#
|
||||
# Suppose the following git directory structure (note the use of different config tools):
|
||||
#
|
||||
# cluster-deployments
|
||||
# └── add-ons
|
||||
# ├── argo-rollouts
|
||||
# │ ├── all.yaml
|
||||
# │ └── kustomization.yaml
|
||||
# ├── argo-workflows
|
||||
# │ └── install.yaml
|
||||
# ├── grafana
|
||||
# │ ├── Chart.yaml
|
||||
# │ └── values.yaml
|
||||
# └── prometheus-operator
|
||||
# ├── Chart.yaml
|
||||
# └── values.yaml
|
||||
#
|
||||
# The following ApplicationSet would produce four applications (in different namespaces),
|
||||
# using the directory basename as both the namespace and application name.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
directories:
|
||||
- path: add-ons/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: http://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,55 +0,0 @@
|
||||
# This example demonstrates a git file generator which traverses the directory structure of a git
|
||||
# repository to discover items based on a filename convention. For each file discovered, the
|
||||
# contents of the discovered files themselves, act as the set of inputs to the app template.
|
||||
#
|
||||
# Suppose the following git directory structure:
|
||||
#
|
||||
# cluster-deployments
|
||||
# ├── apps
|
||||
# │ └── guestbook
|
||||
# │ └── install.yaml
|
||||
# └── cluster-config
|
||||
# ├── engineering
|
||||
# │ ├── dev
|
||||
# │ │ └── config.json
|
||||
# │ └── prod
|
||||
# │ └── config.json
|
||||
# └── finance
|
||||
# ├── dev
|
||||
# │ └── config.json
|
||||
# └── prod
|
||||
# └── config.json
|
||||
#
|
||||
# The discovered files (e.g. config.json) files can be any structured data supplied to the
|
||||
# generated application. e.g.:
|
||||
# {
|
||||
# "aws_account": "123456",
|
||||
# "asset_id": "11223344"
|
||||
# "cluster": {
|
||||
# "owner": "Jesse_Suen@intuit.com",
|
||||
# "name": "engineering-dev",
|
||||
# "address": "http://1.2.3.4"
|
||||
# }
|
||||
# }
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: "**/config.json"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: apps/guestbook
|
||||
destination:
|
||||
server: '{{cluster.address}}'
|
||||
namespace: guestbook
|
||||
@@ -1,68 +0,0 @@
|
||||
# This example demonstrates a git file generator which produces its items based on one or
|
||||
# more files referenced in a git repo. The referenced files would contain a json/yaml list of
|
||||
# arbitrary structured objects. Each item of the list would become a set of parameters to a
|
||||
# generated application.
|
||||
#
|
||||
# Suppose the following git directory structure:
|
||||
#
|
||||
# cluster-deployments
|
||||
# ├── apps
|
||||
# │ └── guestbook
|
||||
# │ ├── v1.0
|
||||
# │ │ └── install.yaml
|
||||
# │ └── v2.0
|
||||
# │ └── install.yaml
|
||||
# └── config
|
||||
# └── clusters.json
|
||||
#
|
||||
# In this example, the `clusters.json` file is json list of structured data:
|
||||
# [
|
||||
# {
|
||||
# "account": "123456",
|
||||
# "asset_id": "11223344",
|
||||
# "cluster": {
|
||||
# "owner": "Jesse_Suen@intuit.com",
|
||||
# "name": "engineering-dev",
|
||||
# "address": "http://1.2.3.4"
|
||||
# },
|
||||
# "appVersions": {
|
||||
# "prometheus-operator": "v0.38",
|
||||
# "guestbook": "v2.0"
|
||||
# }
|
||||
# },
|
||||
# {
|
||||
# "account": "456789",
|
||||
# "asset_id": "55667788",
|
||||
# "cluster": {
|
||||
# "owner": "Alexander_Matyushentsev@intuit.com",
|
||||
# "name": "engineering-prod",
|
||||
# "address": "http://2.4.6.8"
|
||||
# },
|
||||
# "appVersions": {
|
||||
# "prometheus-operator": "v0.38",
|
||||
# "guestbook": "v1.0"
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: config/clusters.json
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: apps/guestbook/{{appVersions.guestbook}}
|
||||
destination:
|
||||
server: http://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
@@ -1,33 +0,0 @@
|
||||
# The list generator specifies a literal list of argument values to the app spec template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
values:
|
||||
project: dev
|
||||
- cluster: engineering-prod
|
||||
url: https://2.4.6.8
|
||||
values:
|
||||
project: prod
|
||||
- cluster: finance-preprod
|
||||
url: https://9.8.7.6
|
||||
values:
|
||||
project: preprod
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,3 +0,0 @@
|
||||
# Proposal Examples
|
||||
This directory contains examples that are not yet implemented.
|
||||
They are part of the project to indicate future progress, and we are welcome any contribution that will add an implementation
|
||||
@@ -1,48 +0,0 @@
|
||||
# For all generators, filters can be applied to reduce the generated items to a smaller subset.
|
||||
# A powerful set of filter expressions are supported using syntax provided by the
|
||||
# https://github.com/antonmedv/expr library. Examples expressions are demonstrated below
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
# Match all clusters who meet ALL of the following conditions:
|
||||
# 1. name matches the regex `sales-.*`
|
||||
# 2. environment label is either 'staging' or 'prod'
|
||||
- clusters:
|
||||
filters:
|
||||
- expr: '{{name}} matches "sales-.*"'
|
||||
- expr: '{{metadata.labels.environment}} in [staging, prod]'
|
||||
values:
|
||||
version: '2.0.0'
|
||||
# Filter items from `config/clusters.json` in the `cluster-deployments` git repo,
|
||||
# to only those having the `cluster.enabled == true` property. e.g.:
|
||||
# {
|
||||
# ...
|
||||
# "cluster": {
|
||||
# "enabled": true,
|
||||
# ...
|
||||
# }
|
||||
# }
|
||||
- git:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
files:
|
||||
- path: config/clusters.json
|
||||
filters:
|
||||
- expr: '{{cluster.enabled}} == true'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: "{{values.version}}"
|
||||
chart: guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: foo
|
||||
value: "{{metadata.annotations.foo}}"
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
@@ -1,48 +0,0 @@
|
||||
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
|
||||
# useful to do this in order to override the spec.template stanza, and when simple string
|
||||
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
|
||||
# a partial definition, which overrides/patch the default template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "project"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
path: '{{cluster}}-override'
|
||||
destination: {}
|
||||
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-prod
|
||||
url: https://1.2.3.4
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "project2"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
path: '{{cluster}}-override2'
|
||||
destination: {}
|
||||
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: "project"
|
||||
source:
|
||||
repoURL: https://github.com/infra-team/cluster-deployments.git
|
||||
targetRevision: HEAD
|
||||
path: guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,6 +0,0 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,4 +0,0 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 40.5.0
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -1 +0,0 @@
|
||||
# Blank values.yaml
|
||||
@@ -1,6 +0,0 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,23 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-guestbook
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,19 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "helm-guestbook.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "helm-guestbook.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "helm-guestbook.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "helm-guestbook.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "helm-guestbook.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "helm-guestbook.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
chart: {{ template "helm-guestbook.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "helm-guestbook.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
chart: {{ template "helm-guestbook.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "helm-guestbook.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,2 +0,0 @@
|
||||
service:
|
||||
type: LoadBalancer
|
||||
@@ -1,45 +0,0 @@
|
||||
# Default values for helm-guestbook.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: gcr.io/heptio-images/ks-guestbook-demo
|
||||
tag: 0.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,4 +0,0 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 40.5.0
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -1 +0,0 @@
|
||||
# Blank values.yaml
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
namespace: argocd
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/git-generator-directory/excludes/cluster-addons/*
|
||||
- exclude: true
|
||||
path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-addons
|
||||
namespace: argocd
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/git-generator-directory/cluster-addons/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,7 +0,0 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"aws_account": "123456",
|
||||
"asset_id": "11223344",
|
||||
"cluster": {
|
||||
"owner": "cluster-admin@company.com",
|
||||
"name": "engineering-dev",
|
||||
"address": "http://1.2.3.4"
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"aws_account": "123456",
|
||||
"asset_id": "11223344",
|
||||
"cluster": {
|
||||
"owner": "cluster-admin@company.com",
|
||||
"name": "engineering-prod",
|
||||
"address": "http://1.2.3.4"
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
files:
|
||||
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: "applicationset/examples/git-generator-files-discovery/apps/guestbook"
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
#server: '{{cluster.address}}'
|
||||
namespace: guestbook
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/list-generator/guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,6 +0,0 @@
|
||||
#namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- namespace-install.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,417 +0,0 @@
|
||||
# This is an auto-generated file. DO NOT EDIT
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterworkflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: ClusterWorkflowTemplate
|
||||
listKind: ClusterWorkflowTemplateList
|
||||
plural: clusterworkflowtemplates
|
||||
shortNames:
|
||||
- clusterwftmpl
|
||||
- cwft
|
||||
singular: clusterworkflowtemplate
|
||||
scope: Cluster
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cronworkflows.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: CronWorkflow
|
||||
listKind: CronWorkflowList
|
||||
plural: cronworkflows
|
||||
shortNames:
|
||||
- cwf
|
||||
- cronwf
|
||||
singular: cronworkflow
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workfloweventbindings.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowEventBinding
|
||||
listKind: WorkflowEventBindingList
|
||||
plural: workfloweventbindings
|
||||
shortNames:
|
||||
- wfeb
|
||||
singular: workfloweventbinding
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflows.argoproj.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .status.phase
|
||||
description: Status of the workflow
|
||||
name: Status
|
||||
type: string
|
||||
- JSONPath: .status.startedAt
|
||||
description: When the workflow was started
|
||||
format: date-time
|
||||
name: Age
|
||||
type: date
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: Workflow
|
||||
listKind: WorkflowList
|
||||
plural: workflows
|
||||
shortNames:
|
||||
- wf
|
||||
singular: workflow
|
||||
scope: Namespaced
|
||||
subresources: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: workflowtemplates.argoproj.io
|
||||
spec:
|
||||
group: argoproj.io
|
||||
names:
|
||||
kind: WorkflowTemplate
|
||||
listKind: WorkflowTemplateList
|
||||
plural: workflowtemplates
|
||||
shortNames:
|
||||
- wftmpl
|
||||
singular: workflowtemplate
|
||||
scope: Namespaced
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- create
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflowtemplates
|
||||
- workflowtemplates/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: argo-server-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/exec
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- watch
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- workflows
|
||||
- workfloweventbindings
|
||||
- workflowtemplates
|
||||
- cronworkflows
|
||||
- cronworkflows/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: argo-server-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: argo-server-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: workflow-controller-configmap
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 2746
|
||||
targetPort: 2746
|
||||
selector:
|
||||
app: argo-server
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: workflow-controller-metrics
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: 9090
|
||||
selector:
|
||||
app: workflow-controller
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: argo-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: argo-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: argo-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- server
|
||||
- --namespaced
|
||||
image: argoproj/argocli:v2.12.5
|
||||
name: argo-server
|
||||
ports:
|
||||
- containerPort: 2746
|
||||
name: web
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 2746
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: workflow-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: workflow-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: workflow-controller
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --configmap
|
||||
- workflow-controller-configmap
|
||||
- --executor-image
|
||||
- argoproj/argoexec:v2.12.5
|
||||
- --namespaced
|
||||
command:
|
||||
- workflow-controller
|
||||
image: argoproj/workflow-controller:v2.12.5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
name: workflow-controller
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: metrics
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
serviceAccountName: argo
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: helm-prometheus-operator
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: "1.0"
|
||||
@@ -1,4 +0,0 @@
|
||||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
version: 9.4.10
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
@@ -1 +0,0 @@
|
||||
# Blank values.yaml
|
||||
@@ -1,33 +0,0 @@
|
||||
# This example demonstrates the combining of the git generator with a cluster generator
|
||||
# The expected output would be an application per git directory and a cluster (application_count = git directory * clusters)
|
||||
#
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/matrix/cluster-addons/*
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}-{{name}}'
|
||||
spec:
|
||||
project: '{{metadata.labels.environment}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,39 +0,0 @@
|
||||
# This example demonstrates the combining of the git generator with a list generator
|
||||
# The expected output would be an application per git directory and a list entry (application_count = git directory * list entries)
|
||||
#
|
||||
#
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: list-git
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: applicationset/examples/matrix/cluster-addons/*
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
values:
|
||||
project: dev
|
||||
- cluster: engineering-prod
|
||||
url: https://2.4.6.8
|
||||
values:
|
||||
project: prod
|
||||
template:
|
||||
metadata:
|
||||
name: '{{path.basename}}-{{cluster}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,37 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: list-and-list
|
||||
namespace: argocd
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
suffix: '1'
|
||||
- values:
|
||||
suffix: '2'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-{{values.suffix}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,67 +0,0 @@
|
||||
# The matrix generator can contain other combination-type generators (matrix and union). But nested matrix and union
|
||||
# generators cannot contain further-nested matrix or union generators.
|
||||
#
|
||||
# The generators are evaluated from most-nested to least-nested. In this case:
|
||||
# 1. The union generator joins two lists to make 3 parameter sets.
|
||||
# 2. The inner matrix generator takes the cartesian product of the two lists to make 4 parameters sets.
|
||||
# 3. The outer matrix generator takes the cartesian product of the 3 union and the 4 inner matrix parameter sets to
|
||||
# make 3*4=12 final parameter sets.
|
||||
# 4. The 12 final parameter sets are evaluated against the top-level template to generate 12 Applications.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: matrix-and-union-in-matrix
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- union:
|
||||
mergeKeys:
|
||||
- cluster
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- cluster: engineering-test
|
||||
url: https://kubernetes.default.svc
|
||||
values:
|
||||
project: default
|
||||
- matrix:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
suffix: '1'
|
||||
- values:
|
||||
suffix: '2'
|
||||
- list:
|
||||
elements:
|
||||
- values:
|
||||
prefix: 'first'
|
||||
- values:
|
||||
prefix: 'second'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{values.prefix}}-{{cluster}}-{{values.suffix}}'
|
||||
spec:
|
||||
project: '{{values.project}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: '{{path.basename}}'
|
||||
@@ -1,44 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: merge-clusters-and-list
|
||||
spec:
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
- server
|
||||
generators:
|
||||
- clusters:
|
||||
values:
|
||||
kafka: 'true'
|
||||
redis: 'false'
|
||||
# For clusters with a specific label, enable Kafka.
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
use-kafka: 'false'
|
||||
values:
|
||||
kafka: 'false'
|
||||
# For a specific cluster, enable Redis.
|
||||
- list:
|
||||
elements:
|
||||
- server: https://some-specific-cluster
|
||||
values.redis: 'true'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: kafka
|
||||
value: '{{values.kafka}}'
|
||||
- name: redis
|
||||
value: '{{values.redis}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: default
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: merge-two-matrixes
|
||||
spec:
|
||||
generators:
|
||||
- merge:
|
||||
mergeKeys:
|
||||
- server
|
||||
- environment
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
- clusters:
|
||||
values:
|
||||
replicaCount: '2'
|
||||
- list:
|
||||
elements:
|
||||
- environment: staging
|
||||
namespace: guestbook-non-prod
|
||||
- environment: prod
|
||||
namespace: guestbook
|
||||
- list:
|
||||
elements:
|
||||
- server: https://kubernetes.default.svc
|
||||
environment: staging
|
||||
values.replicaCount: '1'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-guestbook-{{environment}}'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: HEAD
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: replicaCount
|
||||
value: '{{values.replicaCount}}'
|
||||
destination:
|
||||
server: '{{server}}'
|
||||
namespace: '{{namespace}}'
|
||||
@@ -1,40 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: myapp
|
||||
spec:
|
||||
generators:
|
||||
- pullRequest:
|
||||
github:
|
||||
# The GitHub organization or user.
|
||||
owner: myorg
|
||||
# The Github repository
|
||||
repo: myrepo
|
||||
# For GitHub Enterprise. (optional)
|
||||
api: https://git.example.com/
|
||||
# Reference to a Secret containing an access token. (optional)
|
||||
tokenRef:
|
||||
secretName: github-token
|
||||
key: token
|
||||
# Labels is used to filter the PRs that you want to target. (optional)
|
||||
labels:
|
||||
- preview
|
||||
template:
|
||||
metadata:
|
||||
name: 'myapp-{{ branch }}-{{ number }}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: 'https://github.com/myorg/myrepo.git'
|
||||
targetRevision: '{{ head_sha }}'
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
parameters:
|
||||
- name: "image.tag"
|
||||
value: "pull-{{ head_sha }}"
|
||||
project: default
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: "{{ branch }}-{{ number }}"
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- scmProvider:
|
||||
github:
|
||||
organization: argoproj
|
||||
cloneProtocol: https
|
||||
filters:
|
||||
- repositoryMatch: example-apps
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ repository }}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: '{{ url }}'
|
||||
targetRevision: '{{ branch }}'
|
||||
path: guestbook
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,7 +0,0 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: guestbook-ui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: guestbook-ui
|
||||
@@ -1,7 +0,0 @@
|
||||
namePrefix: kustomize-
|
||||
|
||||
resources:
|
||||
- guestbook-ui-deployment.yaml
|
||||
- guestbook-ui-svc.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
@@ -1,36 +0,0 @@
|
||||
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
|
||||
# useful to do this in order to override the spec.template stanza, and when simple string
|
||||
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
|
||||
# a partial definition, which overrides/patch the default template.
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
template:
|
||||
metadata: {}
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
targetRevision: HEAD
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
path: 'applicationset/examples/template-override/{{cluster}}-override'
|
||||
destination: {}
|
||||
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/template-override/default
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
@@ -1,186 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type"
|
||||
ArgoCDSecretTypeCluster = "cluster"
|
||||
)
|
||||
|
||||
var _ Generator = (*ClusterGenerator)(nil)
|
||||
|
||||
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type ClusterGenerator struct {
|
||||
client.Client
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
// namespace is the Argo CD namespace
|
||||
namespace string
|
||||
settingsManager *settings.SettingsManager
|
||||
}
|
||||
|
||||
func NewClusterGenerator(c client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator {
|
||||
|
||||
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
|
||||
|
||||
g := &ClusterGenerator{
|
||||
Client: c,
|
||||
ctx: ctx,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
settingsManager: settingsManager,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// GetRequeueAfter never requeue the cluster generator because the `clusterSecretEventHandler` will requeue the appsets
|
||||
// when the cluster secrets change
|
||||
func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) *argoappsetv1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Clusters.Template
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GenerateParams(
|
||||
appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, _ *argoappsetv1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.Clusters == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// Do not include the local cluster in the cluster parameters IF there is a non-empty selector
|
||||
// - Since local clusters do not have secrets, they do not have labels to match against
|
||||
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
clusterSecrets, err := g.getSecretsByClusterName(appSetGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
|
||||
for _, cluster := range clustersFromArgoCD.Items {
|
||||
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
|
||||
secretsFound = append(secretsFound, secretForCluster)
|
||||
|
||||
} else if !ignoreLocalClusters {
|
||||
// If there is no secret for the cluster, it's the local cluster, so handle it here.
|
||||
params := map[string]string{}
|
||||
params["name"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
|
||||
for key, value := range appSetGenerator.Clusters.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
|
||||
log.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range secretsFound {
|
||||
params := map[string]string{}
|
||||
params["name"] = string(cluster.Data["name"])
|
||||
params["nameNormalized"] = sanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
for key, value := range cluster.ObjectMeta.Annotations {
|
||||
params[fmt.Sprintf("metadata.annotations.%s", key)] = value
|
||||
}
|
||||
for key, value := range cluster.ObjectMeta.Labels {
|
||||
params[fmt.Sprintf("metadata.labels.%s", key)] = value
|
||||
}
|
||||
for key, value := range appSetGenerator.Clusters.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
log.WithField("cluster", cluster.Name).Info("matched cluster secret")
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
|
||||
// List all Clusters:
|
||||
clusterSecretList := &corev1.SecretList{}
|
||||
|
||||
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster)
|
||||
secretSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := g.Client.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("clusters matching labels", "count", len(clusterSecretList.Items))
|
||||
|
||||
res := map[string]corev1.Secret{}
|
||||
|
||||
for _, cluster := range clusterSecretList.Items {
|
||||
clusterName := string(cluster.Data["name"])
|
||||
|
||||
res[clusterName] = cluster
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
// sanitize the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func sanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
@@ -1,254 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"testing"
|
||||
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type possiblyErroringFakeCtrlRuntimeClient struct {
|
||||
client.Client
|
||||
shouldError bool
|
||||
}
|
||||
|
||||
func (p *possiblyErroringFakeCtrlRuntimeClient) List(ctx context.Context, secretList client.ObjectList, opts ...client.ListOption) error {
|
||||
if p.shouldError {
|
||||
return fmt.Errorf("could not list Secrets")
|
||||
}
|
||||
return p.Client.List(ctx, secretList, opts...)
|
||||
}
|
||||
|
||||
func TestGenerateParams(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "staging-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("staging-01"),
|
||||
"server": []byte("https://staging-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "production-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production_01/west"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
selector metav1.LabelSelector
|
||||
values map[string]string
|
||||
expected []map[string]string
|
||||
// clientError is true if a k8s client error should be simulated
|
||||
clientError bool
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "no label selector",
|
||||
selector: metav1.LabelSelector{},
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
|
||||
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
|
||||
{"name": "in-cluster", "server": "https://kubernetes.default.svc"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "secret type label selector",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
},
|
||||
},
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
|
||||
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"environment": "production",
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging with match labels",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
MatchLabels: map[string]string{
|
||||
"org": "foo",
|
||||
},
|
||||
},
|
||||
values: map[string]string{
|
||||
"name": "baz",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"values.name": "baz", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "simulate client error",
|
||||
selector: metav1.LabelSelector{},
|
||||
values: nil,
|
||||
expected: nil,
|
||||
clientError: true,
|
||||
expectedError: fmt.Errorf("could not list Secrets"),
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace")
|
||||
|
||||
got, err := clusterGenerator.GenerateParams(&argoappsetv1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoappsetv1alpha1.ClusterGenerator{
|
||||
Selector: testCase.selector,
|
||||
Values: testCase.values,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeClusterName(t *testing.T) {
|
||||
t.Run("valid DNS-1123 subdomain name", func(t *testing.T) {
|
||||
assert.Equal(t, "cluster-name", sanitizeName("cluster-name"))
|
||||
})
|
||||
t.Run("invalid DNS-1123 subdomain name", func(t *testing.T) {
|
||||
invalidName := "-.--CLUSTER/name -./.-"
|
||||
assert.Equal(t, "cluster-name", sanitizeName(invalidName))
|
||||
})
|
||||
}
|
||||
@@ -1,229 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*DuckTypeGenerator)(nil)
|
||||
|
||||
// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type DuckTypeGenerator struct {
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
settingsManager *settings.SettingsManager
|
||||
}
|
||||
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator {
|
||||
|
||||
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
|
||||
|
||||
g := &DuckTypeGenerator{
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
settingsManager: settingsManager,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
|
||||
// Return a requeue default of 3 minutes, if no override is specified.
|
||||
|
||||
if appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.ClusterDecisionResource.Template
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// Not likely to happen
|
||||
if appSetGenerator.ClusterDecisionResource == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read the configMapRef
|
||||
cm, err := g.clientset.CoreV1().ConfigMaps(g.namespace).Get(g.ctx, appSetGenerator.ClusterDecisionResource.ConfigMapRef, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract GVK data for the dynamic client to use
|
||||
versionIdx := strings.Index(cm.Data["apiVersion"], "/")
|
||||
kind := cm.Data["kind"]
|
||||
resourceName := appSetGenerator.ClusterDecisionResource.Name
|
||||
labelSelector := appSetGenerator.ClusterDecisionResource.LabelSelector
|
||||
|
||||
log.WithField("kind.apiVersion", kind+"."+cm.Data["apiVersion"]).Info("Kind.Group/Version Reference")
|
||||
|
||||
// Validate the fields
|
||||
if kind == "" || versionIdx < 1 {
|
||||
log.Warningf("kind=%v, resourceName=%v, versionIdx=%v", kind, resourceName, versionIdx)
|
||||
return nil, fmt.Errorf("There is a problem with the apiVersion, kind or resourceName provided")
|
||||
}
|
||||
|
||||
if (resourceName == "" && labelSelector.MatchLabels == nil && labelSelector.MatchExpressions == nil) ||
|
||||
(resourceName != "" && (labelSelector.MatchExpressions != nil || labelSelector.MatchLabels != nil)) {
|
||||
|
||||
log.Warningf("You must choose either resourceName=%v, labelSelector.matchLabels=%v or labelSelect.matchExpressions=%v", resourceName, labelSelector.MatchLabels, labelSelector.MatchExpressions)
|
||||
return nil, fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator")
|
||||
}
|
||||
|
||||
// Split up the apiVersion
|
||||
group := cm.Data["apiVersion"][0:versionIdx]
|
||||
version := cm.Data["apiVersion"][versionIdx+1:]
|
||||
log.WithField("kind.group.version", kind+"."+group+"/"+version).Debug("decoded Ref")
|
||||
|
||||
duckGVR := schema.GroupVersionResource{Group: group, Version: version, Resource: kind}
|
||||
|
||||
listOptions := metav1.ListOptions{}
|
||||
if resourceName == "" {
|
||||
listOptions.LabelSelector = metav1.FormatLabelSelector(&labelSelector)
|
||||
log.WithField("listOptions.LabelSelector", listOptions.LabelSelector).Info("selection type")
|
||||
} else {
|
||||
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", resourceName).String()
|
||||
//metav1.Convert_fields_Selector_To_string(fields.).Sprintf("metadata.name=%s", resourceName)
|
||||
log.WithField("listOptions.FieldSelector", listOptions.FieldSelector).Info("selection type")
|
||||
}
|
||||
|
||||
duckResources, err := g.dynClient.Resource(duckGVR).Namespace(g.namespace).List(g.ctx, listOptions)
|
||||
|
||||
if err != nil {
|
||||
log.WithField("GVK", duckGVR).Warning("resources were not found")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(duckResources.Items) == 0 {
|
||||
log.Warning("no resource found, make sure you clusterDecisionResource is defined correctly")
|
||||
return nil, fmt.Errorf("no clusterDecisionResources found")
|
||||
}
|
||||
|
||||
// Override the duck type in the status of the resource
|
||||
statusListKey := "clusters"
|
||||
|
||||
matchKey := cm.Data["matchKey"]
|
||||
|
||||
if cm.Data["statusListKey"] != "" {
|
||||
statusListKey = cm.Data["statusListKey"]
|
||||
}
|
||||
if matchKey == "" {
|
||||
log.WithField("matchKey", matchKey).Warning("matchKey not found in " + cm.Name)
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
clusterDecisions := []interface{}{}
|
||||
|
||||
// Build the decision slice
|
||||
for _, duckResource := range duckResources.Items {
|
||||
log.WithField("duckResourceName", duckResource.GetName()).Debug("found resource")
|
||||
|
||||
if duckResource.Object["status"] == nil || len(duckResource.Object["status"].(map[string]interface{})) == 0 {
|
||||
log.Warningf("clusterDecisionResource: %s, has no status", duckResource.GetName())
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("duckResourceStatus", duckResource.Object["status"]).Debug("found resource")
|
||||
|
||||
clusterDecisions = append(clusterDecisions, duckResource.Object["status"].(map[string]interface{})[statusListKey].([]interface{})...)
|
||||
|
||||
}
|
||||
log.Infof("Number of decisions found: %v", len(clusterDecisions))
|
||||
|
||||
// Read this outside the loop to improve performance
|
||||
argoClusters := clustersFromArgoCD.Items
|
||||
|
||||
if len(clusterDecisions) > 0 {
|
||||
for _, cluster := range clusterDecisions {
|
||||
|
||||
// generated instance of cluster params
|
||||
params := map[string]string{}
|
||||
|
||||
log.Infof("cluster: %v", cluster)
|
||||
matchValue := cluster.(map[string]interface{})[matchKey]
|
||||
if matchValue == nil || matchValue.(string) == "" {
|
||||
log.Warningf("matchKey=%v not found in \"%v\" list: %v\n", matchKey, statusListKey, cluster.(map[string]interface{}))
|
||||
continue
|
||||
}
|
||||
|
||||
strMatchValue := matchValue.(string)
|
||||
log.WithField(matchKey, strMatchValue).Debug("validate against ArgoCD")
|
||||
|
||||
found := false
|
||||
|
||||
for _, argoCluster := range argoClusters {
|
||||
if argoCluster.Name == strMatchValue {
|
||||
|
||||
log.WithField(matchKey, argoCluster.Name).Info("matched cluster in ArgoCD")
|
||||
params["name"] = argoCluster.Name
|
||||
params["server"] = argoCluster.Server
|
||||
|
||||
found = true
|
||||
break // Stop looking
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.WithField(matchKey, strMatchValue).Warning("unmatched cluster in ArgoCD")
|
||||
continue
|
||||
}
|
||||
|
||||
for key, value := range cluster.(map[string]interface{}) {
|
||||
params[key] = value.(string)
|
||||
}
|
||||
|
||||
for key, value := range appSetGenerator.ClusterDecisionResource.Values {
|
||||
params[fmt.Sprintf("values.%s", key)] = value
|
||||
}
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("clusterDecisionResource status." + statusListKey + " missing")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,315 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
const resourceApiVersion = "mallard.io/v1"
|
||||
const resourceKind = "ducks"
|
||||
const resourceName = "quak"
|
||||
|
||||
func TestGenerateParamsForDuckType(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "staging-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("staging-01"),
|
||||
"server": []byte("https://staging-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "production-01",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production-01"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
|
||||
duckType := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "all-species"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "staging-01",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
duckTypeProdOnly := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "spotted"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
duckTypeEmpty := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": resourceName,
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "canvasback"},
|
||||
},
|
||||
"status": map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
configMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-configmap",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"apiVersion": resourceApiVersion,
|
||||
"kind": resourceKind,
|
||||
"statusListKey": "decisions",
|
||||
"matchKey": "clusterName",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
configMapRef string
|
||||
resourceName string
|
||||
labelSelector metav1.LabelSelector
|
||||
resource *unstructured.Unstructured
|
||||
values map[string]string
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "no duck resource",
|
||||
resourceName: "",
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
|
||||
},
|
||||
/*** This does not work with the FAKE runtime client, fieldSelectors are broken.
|
||||
{
|
||||
name: "invalid name for duck resource",
|
||||
resourceName: resourceName + "-different",
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("duck.mallard.io \"quak\" not found"),
|
||||
},
|
||||
***/
|
||||
{
|
||||
name: "duck type generator resourceName",
|
||||
resourceName: resourceName,
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only",
|
||||
resourceName: resourceName,
|
||||
resource: duckTypeProdOnly,
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type empty status",
|
||||
resourceName: resourceName,
|
||||
resource: duckTypeEmpty,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type empty status labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "canvasback"}},
|
||||
resource: duckTypeEmpty,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "all-species"}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production-only labelSelector.matchLabels",
|
||||
resourceName: "",
|
||||
resource: duckTypeProdOnly,
|
||||
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "spotted"}},
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator labelSelector.matchExpressions",
|
||||
resourceName: "",
|
||||
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "duck",
|
||||
Operator: "In",
|
||||
Values: []string{"all-species", "marbled"},
|
||||
},
|
||||
}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: []map[string]string{
|
||||
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
|
||||
|
||||
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "duck type generator resourceName and labelSelector.matchExpressions",
|
||||
resourceName: resourceName,
|
||||
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "duck",
|
||||
Operator: "In",
|
||||
Values: []string{"all-species", "marbled"},
|
||||
},
|
||||
}},
|
||||
resource: duckType,
|
||||
values: nil,
|
||||
expected: nil,
|
||||
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(append(runtimeClusters, configMap)...)
|
||||
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{{
|
||||
Group: "mallard.io",
|
||||
Version: "v1",
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
var duckTypeGenerator = NewDuckTypeGenerator(context.Background(), fakeDynClient, appClientset, "namespace")
|
||||
|
||||
got, err := duckTypeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
ClusterDecisionResource: &argoprojiov1alpha1.DuckTypeGenerator{
|
||||
ConfigMapRef: "my-configmap",
|
||||
Name: testCase.resourceName,
|
||||
LabelSelector: testCase.labelSelector,
|
||||
Values: testCase.values,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
type TransformResult struct {
|
||||
Params []map[string]string
|
||||
Template argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
//Transform a spec generator to list of paramSets and a template
|
||||
func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, allGenerators map[string]Generator, baseTemplate argoprojiov1alpha1.ApplicationSetTemplate, appSet *argoprojiov1alpha1.ApplicationSet) ([]TransformResult, error) {
|
||||
res := []TransformResult{}
|
||||
var firstError error
|
||||
|
||||
generators := GetRelevantGenerators(&requestedGenerator, allGenerators)
|
||||
for _, g := range generators {
|
||||
// we call mergeGeneratorTemplate first because GenerateParams might be more costly so we want to fail fast if there is an error
|
||||
mergedTemplate, err := mergeGeneratorTemplate(g, &requestedGenerator, baseTemplate)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Error("error generating params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
params, err := g.GenerateParams(&requestedGenerator, appSet)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Error("error generating params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, TransformResult{
|
||||
Params: params,
|
||||
Template: mergedTemplate,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return res, firstError
|
||||
|
||||
}
|
||||
|
||||
func GetRelevantGenerators(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, generators map[string]Generator) []Generator {
|
||||
var res []Generator
|
||||
|
||||
v := reflect.Indirect(reflect.ValueOf(requestedGenerator))
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if !field.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.ValueOf(field.Interface()).IsNil() {
|
||||
res = append(res, generators[v.Type().Field(i).Name])
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) (argoprojiov1alpha1.ApplicationSetTemplate, error) {
|
||||
|
||||
// Make a copy of the value from `GetTemplate()` before merge, rather than copying directly into
|
||||
// the provided parameter (which will touch the original resource object returned by client-go)
|
||||
dest := g.GetTemplate(requestedGenerator).DeepCopy()
|
||||
|
||||
err := mergo.Merge(dest, applicationSetTemplate)
|
||||
|
||||
return *dest, err
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jeremywohl/flatten"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*GitGenerator)(nil)
|
||||
|
||||
type GitGenerator struct {
|
||||
repos services.Repos
|
||||
}
|
||||
|
||||
func NewGitGenerator(repos services.Repos) Generator {
|
||||
g := &GitGenerator{
|
||||
repos: repos,
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Git.Template
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
|
||||
// Return a requeue default of 3 minutes, if no default is specified.
|
||||
|
||||
if appSetGenerator.Git.RequeueAfterSeconds != nil {
|
||||
return time.Duration(*appSetGenerator.Git.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.Git == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
var err error
|
||||
var res []map[string]string
|
||||
if appSetGenerator.Git.Directories != nil {
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator)
|
||||
} else if appSetGenerator.Git.Files != nil {
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator)
|
||||
} else {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) ([]map[string]string, error) {
|
||||
|
||||
// Directories, not files
|
||||
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"allPaths": allPaths,
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
}).Info("applications result from the repo service")
|
||||
|
||||
requestedApps := g.filterApps(appSetGenerator.Git.Directories, allPaths)
|
||||
|
||||
res := g.generateParamsFromApps(requestedApps, appSetGenerator)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) ([]map[string]string, error) {
|
||||
|
||||
// Get all files that match the requested path string, removing duplicates
|
||||
allFiles := make(map[string][]byte)
|
||||
for _, requestedPath := range appSetGenerator.Git.Files {
|
||||
files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for filePath, content := range files {
|
||||
allFiles[filePath] = content
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the unduplicated map into a list, and sort by path to ensure a deterministic
|
||||
// processing order in the subsequent step
|
||||
allPaths := []string{}
|
||||
for path := range allFiles {
|
||||
allPaths = append(allPaths, path)
|
||||
}
|
||||
sort.Strings(allPaths)
|
||||
|
||||
// Generate params from each path, and return
|
||||
res := []map[string]string{}
|
||||
for _, path := range allPaths {
|
||||
|
||||
// A JSON / YAML file path can contain multiple sets of parameters (ie it is an array)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process file '%s': %v", path, err)
|
||||
}
|
||||
|
||||
for index := range paramsArray {
|
||||
res = append(res, paramsArray[index])
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte) ([]map[string]string, error) {
|
||||
objectsFound := []map[string]interface{}{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
err := yaml.Unmarshal(fileContent, &objectsFound)
|
||||
if err != nil {
|
||||
// If unable to parse as an array, attempt to parse as a single object
|
||||
singleObj := make(map[string]interface{})
|
||||
err = yaml.Unmarshal(fileContent, &singleObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse file: %v", err)
|
||||
}
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
// Flatten all objects found, and return them
|
||||
for _, objectFound := range objectsFound {
|
||||
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params := map[string]string{}
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
params["path"] = path.Dir(filePath)
|
||||
params["path.basename"] = path.Base(params["path"])
|
||||
params["path.filename"] = path.Base(filePath)
|
||||
params["path.basenameNormalized"] = sanitizeName(path.Base(params["path"]))
|
||||
params["path.filenameNormalized"] = sanitizeName(path.Base(params["path.filename"]))
|
||||
for k, v := range strings.Split(params["path"], "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryGeneratorItem, allPaths []string) []string {
|
||||
res := []string{}
|
||||
for _, appPath := range allPaths {
|
||||
appInclude := false
|
||||
appExclude := false
|
||||
// Iterating over each appPath and check whether directories object has requestedPath that matches the appPath
|
||||
for _, requestedPath := range Directories {
|
||||
match, err := path.Match(requestedPath.Path, appPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedPath", requestedPath).
|
||||
WithField("appPath", appPath).Error("error while matching appPath to requestedPath")
|
||||
continue
|
||||
}
|
||||
if match && !requestedPath.Exclude {
|
||||
appInclude = true
|
||||
}
|
||||
if match && requestedPath.Exclude {
|
||||
appExclude = true
|
||||
}
|
||||
}
|
||||
// Whenever there is a path with exclude: true it wont be included, even if it is included in a different path pattern
|
||||
if appInclude && !appExclude {
|
||||
res = append(res, appPath)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argoprojiov1alpha1.ApplicationSetGenerator) []map[string]string {
|
||||
// TODO: At some point, the appicationSetGenerator param should be used
|
||||
|
||||
res := make([]map[string]string, len(requestedApps))
|
||||
for i, a := range requestedApps {
|
||||
|
||||
params := make(map[string]string, 2)
|
||||
params["path"] = a
|
||||
params["path.basename"] = path.Base(a)
|
||||
params["path.basenameNormalized"] = sanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params["path"], "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
res[i] = params
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
@@ -1,498 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// type clientSet struct {
|
||||
// RepoServerServiceClient apiclient.RepoServerServiceClient
|
||||
// }
|
||||
|
||||
// func (c *clientSet) NewRepoServerClient() (io.Closer, apiclient.RepoServerServiceClient, error) {
|
||||
// return io.NewCloser(func() error { return nil }), c.RepoServerServiceClient, nil
|
||||
// }
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFile(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]string{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"path": "path/dir",
|
||||
"path.basename": "dir",
|
||||
"path.filename": "file_name.yaml",
|
||||
"path.basenameNormalized": "dir",
|
||||
"path.filenameNormalized": "file-name.yaml",
|
||||
"path[0]": "path",
|
||||
"path[1]": "dir",
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "path[0]": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path.basenameNormalized": "app2", "path[0]": "app2"},
|
||||
{"path": "app_3", "path.basename": "app_3", "path.basenameNormalized": "app-3", "path[0]": "app_3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"p1/app2",
|
||||
"p1/p2/app3",
|
||||
"p1/p2/p3/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "p1/app2", "path.basename": "app2", "path[0]": "p1", "path[1]": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p1/p2/app3", "path.basename": "app3", "path[0]": "p1", "path[1]": "p2", "path[2]": "app3", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths with Exclude",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*", Exclude: true}, {Path: "*"}, {Path: "*/*"}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"p1/app2",
|
||||
"p1/app3",
|
||||
"p2/app3",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path[0]": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path[1]": "app3", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Expecting same exclude behavior with different order",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}, {Path: "*/*"}, {Path: "p1/*", Exclude: true}},
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"p1/app2",
|
||||
"p1/app3",
|
||||
"p2/app3",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{
|
||||
{"path": "app1", "path.basename": "app1", "path[0]": "app1", "path.basenameNormalized": "app1"},
|
||||
{"path": "app2", "path.basename": "app2", "path[0]": "app2", "path.basenameNormalized": "app2"},
|
||||
{"path": "p2/app3", "path.basename": "app3", "path[0]": "p2", "path[1]": "app3", "path.basenameNormalized": "app3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles empty response from repo server",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{},
|
||||
repoError: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles error from repo server",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
repoApps: []string{},
|
||||
repoError: fmt.Errorf("error"),
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("error"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], nil)
|
||||
|
||||
if testCaseCopy.expectedError != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
// files is the list of paths/globs to match
|
||||
files []argoprojiov1alpha1.GitFileGeneratorItem
|
||||
// repoFileContents maps repo path to the literal contents of that path
|
||||
repoFileContents map[string][]byte
|
||||
// if repoPathsError is non-nil, the call to GetPaths(...) will return this error value
|
||||
repoPathsError error
|
||||
expected []map[string]string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow: create params from git files",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "production",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
},
|
||||
"key1": "val1",
|
||||
"key2": {
|
||||
"key2_1": "val2_1",
|
||||
"key2_2": {
|
||||
"key2_2_1": "val2_2_1"
|
||||
}
|
||||
},
|
||||
"key3": 123
|
||||
}`),
|
||||
"cluster-config/staging/config.json": []byte(`{
|
||||
"cluster": {
|
||||
"owner": "foo.bar@example.com",
|
||||
"name": "staging",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"key3": "123",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "foo.bar@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/staging",
|
||||
"path.basename": "staging",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "staging",
|
||||
"path.basenameNormalized": "staging",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "handles error during getting repo paths",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{},
|
||||
repoPathsError: fmt.Errorf("paths error"),
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("paths error"),
|
||||
},
|
||||
{
|
||||
name: "test invalid JSON file returns error",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`invalid json file`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{},
|
||||
expectedError: fmt.Errorf("unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.json"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.json": []byte(`
|
||||
[
|
||||
{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "production",
|
||||
"address": "https://kubernetes.default.svc",
|
||||
"inner": {
|
||||
"one" : "two"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cluster": {
|
||||
"owner": "john.doe@example.com",
|
||||
"name": "staging",
|
||||
"address": "https://kubernetes.default.svc"
|
||||
}
|
||||
}
|
||||
]`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"cluster.inner.one": "two",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.json",
|
||||
"path.filenameNormalized": "config.json",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Test YAML flow",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`
|
||||
cluster:
|
||||
owner: john.doe@example.com
|
||||
name: production
|
||||
address: https://kubernetes.default.svc
|
||||
key1: val1
|
||||
key2:
|
||||
key2_1: val2_1
|
||||
key2_2:
|
||||
key2_2_1: val2_2_1
|
||||
`),
|
||||
"cluster-config/staging/config.yaml": []byte(`
|
||||
cluster:
|
||||
owner: foo.bar@example.com
|
||||
name: staging
|
||||
address: https://kubernetes.default.svc
|
||||
`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"key1": "val1",
|
||||
"key2.key2_1": "val2_1",
|
||||
"key2.key2_2.key2_2_1": "val2_2_1",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "foo.bar@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/staging",
|
||||
"path.basename": "staging",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "staging",
|
||||
"path.basenameNormalized": "staging",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "test YAML array",
|
||||
files: []argoprojiov1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`
|
||||
- cluster:
|
||||
owner: john.doe@example.com
|
||||
name: production
|
||||
address: https://kubernetes.default.svc
|
||||
inner:
|
||||
one: two
|
||||
- cluster:
|
||||
owner: john.doe@example.com
|
||||
name: staging
|
||||
address: https://kubernetes.default.svc`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]string{
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "production",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"cluster.inner.one": "two",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
{
|
||||
"cluster.owner": "john.doe@example.com",
|
||||
"cluster.name": "staging",
|
||||
"cluster.address": "https://kubernetes.default.svc",
|
||||
"path": "cluster-config/production",
|
||||
"path.basename": "production",
|
||||
"path[0]": "cluster-config",
|
||||
"path[1]": "production",
|
||||
"path.basenameNormalized": "production",
|
||||
"path.filename": "config.yaml",
|
||||
"path.filenameNormalized": "config.yaml",
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Files: testCaseCopy.files,
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := gitGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], nil)
|
||||
fmt.Println(got, err)
|
||||
|
||||
if testCaseCopy.expectedError != nil {
|
||||
assert.EqualError(t, err, testCaseCopy.expectedError.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
// Generator defines the interface implemented by all ApplicationSet generators.
|
||||
type Generator interface {
|
||||
// GenerateParams interprets the ApplicationSet and generates all relevant parameters for the application template.
|
||||
// The expected / desired list of parameters is returned, it then will be render and reconciled
|
||||
// against the current state of the Applications in the cluster.
|
||||
GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error)
|
||||
|
||||
// GetRequeueAfter is the the generator can controller the next reconciled loop
|
||||
// In case there is more then one generator the time will be the minimum of the times.
|
||||
// In case NoRequeueAfter is empty, it will be ignored
|
||||
GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration
|
||||
|
||||
// GetTemplate returns the inline template from the spec if there is any, or an empty object otherwise
|
||||
GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
var EmptyAppSetGeneratorError = fmt.Errorf("ApplicationSet is empty")
|
||||
var NoRequeueAfter time.Duration
|
||||
|
||||
// DefaultRequeueAfterSeconds is used when GetRequeueAfter is not specified, it is the default time to wait before the next reconcile loop
|
||||
const (
|
||||
DefaultRequeueAfterSeconds = 3 * time.Minute
|
||||
)
|
||||
@@ -1,74 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*ListGenerator)(nil)
|
||||
|
||||
type ListGenerator struct {
|
||||
}
|
||||
|
||||
func NewListGenerator() Generator {
|
||||
g := &ListGenerator{}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.List.Template
|
||||
}
|
||||
|
||||
func (g *ListGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if appSetGenerator.List == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
res := make([]map[string]string, len(appSetGenerator.List.Elements))
|
||||
|
||||
for i, tmpItem := range appSetGenerator.List.Elements {
|
||||
params := map[string]string{}
|
||||
var element map[string]interface{}
|
||||
err := json.Unmarshal(tmpItem.Raw, &element)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshling list element %v", err)
|
||||
}
|
||||
|
||||
for key, value := range element {
|
||||
if key == "values" {
|
||||
values, ok := (value).(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing values map")
|
||||
}
|
||||
for k, v := range values {
|
||||
value, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing value as string %v", err)
|
||||
}
|
||||
params[fmt.Sprintf("values.%s", k)] = value
|
||||
}
|
||||
} else {
|
||||
v, ok := value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing value as string %v", err)
|
||||
}
|
||||
params[key] = v
|
||||
}
|
||||
}
|
||||
|
||||
res[i] = params
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
func TestGenerateListParams(t *testing.T) {
|
||||
testCases := []struct {
|
||||
elements []apiextensionsv1.JSON
|
||||
expected []map[string]string
|
||||
}{
|
||||
{
|
||||
elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url"}`)}},
|
||||
expected: []map[string]string{{"cluster": "cluster", "url": "url"}},
|
||||
}, {
|
||||
elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "cluster","url": "url","values":{"foo":"bar"}}`)}},
|
||||
expected: []map[string]string{{"cluster": "cluster", "url": "url", "values.foo": "bar"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
var listGenerator = NewListGenerator()
|
||||
|
||||
got, err := listGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: testCase.elements,
|
||||
}}, nil)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*MatrixGenerator)(nil)
|
||||
|
||||
var (
|
||||
ErrMoreThanTwoGenerators = fmt.Errorf("found more than two generators, Matrix support only two")
|
||||
ErrLessThanTwoGenerators = fmt.Errorf("found less than two generators, Matrix support only two")
|
||||
ErrMoreThenOneInnerGenerators = fmt.Errorf("found more than one generator in matrix.Generators")
|
||||
)
|
||||
|
||||
type MatrixGenerator struct {
|
||||
// The inner generators supported by the matrix generator (cluster, git, list...)
|
||||
supportedGenerators map[string]Generator
|
||||
}
|
||||
|
||||
func NewMatrixGenerator(supportedGenerators map[string]Generator) Generator {
|
||||
m := &MatrixGenerator{
|
||||
supportedGenerators: supportedGenerators,
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
|
||||
if appSetGenerator.Matrix == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if len(appSetGenerator.Matrix.Generators) < 2 {
|
||||
return nil, ErrLessThanTwoGenerators
|
||||
}
|
||||
|
||||
if len(appSetGenerator.Matrix.Generators) > 2 {
|
||||
return nil, ErrMoreThanTwoGenerators
|
||||
}
|
||||
|
||||
res := []map[string]string{}
|
||||
|
||||
g0, err := m.getParams(appSetGenerator.Matrix.Generators[0], appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
g1, err := m.getParams(appSetGenerator.Matrix.Generators[1], appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, a := range g0 {
|
||||
for _, b := range g1 {
|
||||
val, err := utils.CombineStringMaps(a, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, val)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
|
||||
matrixGen, err := getMatrixGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: appSetBaseGenerator.List,
|
||||
Clusters: appSetBaseGenerator.Clusters,
|
||||
Git: appSetBaseGenerator.Git,
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
appSet)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("child generator returned an error on parameter generation: %v", err)
|
||||
}
|
||||
|
||||
if len(t) == 0 {
|
||||
return nil, fmt.Errorf("child generator generated no parameters")
|
||||
}
|
||||
|
||||
if len(t) > 1 {
|
||||
return nil, ErrMoreThenOneInnerGenerators
|
||||
}
|
||||
|
||||
return t[0].Params, nil
|
||||
}
|
||||
|
||||
const maxDuration time.Duration = 1<<63 - 1
|
||||
|
||||
func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
res := maxDuration
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Matrix.Generators {
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
for _, g := range generators {
|
||||
temp := g.GetRequeueAfter(base)
|
||||
if temp < res && temp != NoRequeueAfter {
|
||||
found = true
|
||||
res = temp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
return res
|
||||
} else {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getMatrixGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MatrixGenerator, error) {
|
||||
if r.Matrix == nil {
|
||||
return nil, nil
|
||||
}
|
||||
matrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(r.Matrix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return matrix.ToMatrixGenerator(), nil
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Matrix.Template
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user