Compare commits

..

8 Commits

Author SHA1 Message Date
argo-bot
24b93197e0 Bump version to 1.7.0 2020-08-25 18:47:27 +00:00
argo-bot
5a0bb5cefc Bump version to 1.7.0 2020-08-25 18:47:19 +00:00
May Zhang
4d59273383 fix: Badge links are not generating properly when using --rootpath (#4140)
* fix: Badge links are not generating properly when using --rootpath

* fix: fix lint error

* fix: use context.baseHref
2020-08-25 10:06:31 -07:00
Alexander Matyushentsev
4f3537d274 refactor: upgrade K8S client to v0.18.8 (#4149) 2020-08-25 09:27:17 -07:00
May Zhang
76e9e918d2 fix: UI setting auto sync causes erroneous config (#4118)
* fix: UI setting auto sync causes erroneous config

* fix: remove log
2020-08-25 09:27:14 -07:00
jannfis
b2decde4fe fix: Make GnuPG keyring independent of user ID within container (#4136)
* fix: Make GnuPG keyring independent of user ID within container

* Update unit test
2020-08-25 09:27:10 -07:00
argo-bot
4728412cc3 Bump version to 1.7.0-rc1 2020-08-15 19:20:12 +00:00
argo-bot
26b9331820 Bump version to 1.7.0-rc1 2020-08-15 19:20:03 +00:00
1926 changed files with 41915 additions and 216596 deletions

16
.circleci/config.yml Normal file
View File

@@ -0,0 +1,16 @@
version: 2.1
jobs:
dummy:
docker:
- image: cimg/base:2020.01
steps:
- run:
name: Dummy step
command: |
echo "This is a dummy step to satisfy CircleCI"
workflows:
version: 2
workflow:
jobs:
- dummy

324
.circleci/config.yml.off Normal file
View File

@@ -0,0 +1,324 @@
# CircleCI currently disabled in favor of GH actions
version: 2.1
commands:
prepare_environment:
steps:
- run:
name: Configure environment
command: |
set -x
echo "export GOCACHE=/tmp/go-build-cache" | tee -a $BASH_ENV
echo "export ARGOCD_TEST_VERBOSE=true" | tee -a $BASH_ENV
echo "export ARGOCD_TEST_PARALLELISM=4" | tee -a $BASH_ENV
echo "export ARGOCD_SONAR_VERSION=4.2.0.1873" | tee -a $BASH_ENV
configure_git:
steps:
- run:
name: Configure Git
command: |
set -x
# must be configured for tests to run
git config --global user.email you@example.com
git config --global user.name "Your Name"
echo "export PATH=/home/circleci/.go_workspace/src/github.com/argoproj/argo-cd/hack:\$PATH" | tee -a $BASH_ENV
echo "export GIT_ASKPASS=git-ask-pass.sh" | tee -a $BASH_ENV
setup_go_modules:
steps:
- run:
name: Run go mod download and populate vendor
command: |
go mod download
go mod vendor
save_coverage_info:
steps:
- persist_to_workspace:
root: .
paths:
- coverage.out
save_node_modules:
steps:
- persist_to_workspace:
root: ~/argo-cd
paths:
- ui/node_modules
save_go_cache:
steps:
- persist_to_workspace:
root: /tmp
paths:
- go-build-cache
attach_go_cache:
steps:
- attach_workspace:
at: /tmp
install_golang:
steps:
- run:
name: Install Golang v1.14.1
command: |
go get golang.org/dl/go1.14.1
[ -e /home/circleci/sdk/go1.14.1 ] || go1.14.1 download
go env
echo "export GOPATH=/home/circleci/.go_workspace" | tee -a $BASH_ENV
echo "export PATH=/home/circleci/sdk/go1.14.1/bin:\$PATH" | tee -a $BASH_ENV
jobs:
build:
docker:
- image: argoproj/argocd-test-tools:v0.5.0
working_directory: /go/src/github.com/argoproj/argo-cd
steps:
- prepare_environment
- checkout
- run: make build-local
- run: chmod -R 777 vendor
- run: chmod -R 777 ${GOCACHE}
- save_go_cache
codegen:
docker:
- image: argoproj/argocd-test-tools:v0.5.0
working_directory: /go/src/github.com/argoproj/argo-cd
steps:
- prepare_environment
- checkout
- attach_go_cache
- run: helm2 init --client-only
- run: make codegen-local
- run:
name: Check nothing has changed
command: |
set -xo pipefail
# This makes sure you ran `make pre-commit` before you pushed.
# We exclude the Swagger resources; CircleCI doesn't generate them correctly.
# When this fails, it will, create a patch file you can apply locally to fix it.
# To troubleshoot builds: https://argoproj.github.io/argo-cd/developer-guide/ci/
git diff --exit-code -- . ':!Gopkg.lock' ':!assets/swagger.json' | tee codegen.patch
- store_artifacts:
path: codegen.patch
destination: .
test:
working_directory: /go/src/github.com/argoproj/argo-cd
docker:
- image: argoproj/argocd-test-tools:v0.5.0
steps:
- prepare_environment
- checkout
- configure_git
- attach_go_cache
- run: make test-local
- run:
name: Uploading code coverage
command: bash <(curl -s https://codecov.io/bash) -f coverage.out
- run:
name: Output of test-results
command: |
ls -l test-results || true
cat test-results/junit.xml || true
- save_coverage_info
- store_test_results:
path: test-results
- store_artifacts:
path: test-results
destination: .
lint:
working_directory: /go/src/github.com/argoproj/argo-cd
docker:
- image: argoproj/argocd-test-tools:v0.5.0
steps:
- prepare_environment
- checkout
- configure_git
- attach_vendor
- store_go_cache_docker
- run:
name: Run golangci-lint
command: ARGOCD_LINT_GOGC=10 make lint-local
- run:
name: Check that nothing has changed
command: |
gDiff=$(git diff)
if test "$gDiff" != ""; then
echo
echo "###############################################################################"
echo "golangci-lint has made automatic corrections to your code. Please check below"
echo "diff output and commit this to your local branch, or run make lint locally."
echo "###############################################################################"
echo
git diff
exit 1
fi
sonarcloud:
working_directory: /go/src/github.com/argoproj/argo-cd
docker:
- image: argoproj/argocd-test-tools:v0.5.0
environment:
NODE_MODULES: /go/src/github.com/argoproj/argo-cd/ui/node_modules
steps:
- prepare_environment
- checkout
- attach_workspace:
at: .
- run:
command: mkdir -p /tmp/cache/scanner
name: Create cache directory if it doesn't exist
- restore_cache:
keys:
- v1-sonarcloud-scanner-4.2.0.1873
- run:
command: |
set -e
VERSION=4.2.0.1873
SONAR_TOKEN=$SONAR_TOKEN
SCANNER_DIRECTORY=/tmp/cache/scanner
export SONAR_USER_HOME=$SCANNER_DIRECTORY/.sonar
OS="linux"
echo $SONAR_USER_HOME
if [[ ! -x "$SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/bin/sonar-scanner" ]]; then
curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-$VERSION-$OS.zip
unzip -qq -o sonar-scanner-cli-$VERSION-$OS.zip -d $SCANNER_DIRECTORY
fi
chmod +x $SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/bin/sonar-scanner
chmod +x $SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/jre/bin/java
# Workaround for a possible bug in CircleCI
if ! echo $CIRCLE_PULL_REQUEST | grep https://github.com/argoproj; then
unset CIRCLE_PULL_REQUEST
unset CIRCLE_PULL_REQUESTS
fi
# Explicitly set NODE_MODULES
export NODE_MODULES=/go/src/github.com/argoproj/argo-cd/ui/node_modules
export NODE_PATH=/go/src/github.com/argoproj/argo-cd/ui/node_modules
$SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/bin/sonar-scanner
name: SonarCloud
- save_cache:
key: v1-sonarcloud-scanner-4.2.0.1873
paths:
- /tmp/cache/scanner
e2e:
working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
machine:
image: ubuntu-1604:201903-01
environment:
ARGOCD_FAKE_IN_CLUSTER: "true"
ARGOCD_SSH_DATA_PATH: "/tmp/argo-e2e/app/config/ssh"
ARGOCD_TLS_DATA_PATH: "/tmp/argo-e2e/app/config/tls"
ARGOCD_E2E_K3S: "true"
steps:
- run:
name: Install and start K3S v0.5.0
command: |
curl -sfL https://get.k3s.io | sh -
sudo chmod -R a+rw /etc/rancher/k3s
kubectl version
environment:
INSTALL_K3S_EXEC: --docker
INSTALL_K3S_VERSION: v0.5.0
- prepare_environment
- checkout
- run:
name: Fix permissions on filesystem
command: |
mkdir -p /home/circleci/.go_workspace/pkg/mod
chmod -R 777 /home/circleci/.go_workspace/pkg/mod
mkdir -p /tmp/go-build-cache
chmod -R 777 /tmp/go-build-cache
- attach_go_cache
- run:
name: Update kubectl configuration for container
command: |
ipaddr=$(ifconfig $IFACE |grep "inet " | awk '{print $2}')
if echo $ipaddr | grep -q 'addr:'; then
ipaddr=$(echo $ipaddr | awk -F ':' '{print $2}')
fi
test -d $HOME/.kube || mkdir -p $HOME/.kube
kubectl config view --raw | sed -e "s/127.0.0.1:6443/${ipaddr}:6443/g" -e "s/localhost:6443/${ipaddr}:6443/g" > $HOME/.kube/config
environment:
IFACE: ens4
- run:
name: Start E2E test server
command: make start-e2e
background: true
environment:
DOCKER_SRCDIR: /home/circleci/.go_workspace/src
ARGOCD_E2E_TEST: "true"
ARGOCD_IN_CI: "true"
GOPATH: /home/circleci/.go_workspace
- run:
name: Wait for API server to become available
command: |
count=1
until curl -v http://localhost:8080/healthz; do
sleep 10;
if test $count -ge 60; then
echo "Timeout"
exit 1
fi
count=$((count+1))
done
- run:
name: Run E2E tests
command: |
make test-e2e
environment:
ARGOCD_OPTS: "--plaintext"
ARGOCD_E2E_K3S: "true"
IFACE: ens4
DOCKER_SRCDIR: /home/circleci/.go_workspace/src
GOPATH: /home/circleci/.go_workspace
- store_test_results:
path: test-results
- store_artifacts:
path: test-results
destination: .
ui:
docker:
- image: node:11.15.0
working_directory: ~/argo-cd/ui
steps:
- checkout:
path: ~/argo-cd/
- restore_cache:
keys:
- yarn-packages-v4-{{ checksum "yarn.lock" }}
- run: yarn install --frozen-lockfile --ignore-optional --non-interactive
- save_cache:
key: yarn-packages-v4-{{ checksum "yarn.lock" }}
paths: [~/.cache/yarn, node_modules]
- run: yarn test
- run: ./node_modules/.bin/codecov -p ..
- run: NODE_ENV='production' yarn build
- run: yarn lint
- save_node_modules
orbs:
sonarcloud: sonarsource/sonarcloud@1.0.1
workflows:
version: 2
workflow:
jobs:
- build
- test:
requires:
- build
- codegen:
requires:
- build
- ui:
requires:
- build
- sonarcloud:
context: SonarCloud
requires:
- test
- ui
- e2e:
requires:
- build

View File

@@ -7,7 +7,6 @@ ignore:
- "pkg/apis/client/.*"
- "pkg/client/.*"
- "vendor/.*"
- "test/.*"
coverage:
status:
# we've found this not to be useful

View File

@@ -6,7 +6,8 @@ labels: 'bug'
assignees: ''
---
<!-- If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a question in argocd slack [channel](https://argoproj.github.io/community/join-slack). -->
If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a
question in argocd slack [channel](https://argoproj.github.io/community/join-slack).
Checklist:
@@ -16,19 +17,19 @@ Checklist:
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
A clear and concise description of what the bug is.
**To Reproduce**
<!-- A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue. -->
A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
A clear and concise description of what you expected to happen.
**Screenshots**
<!-- If applicable, add screenshots to help explain your problem. -->
If applicable, add screenshots to help explain your problem.
**Version**

View File

@@ -1,12 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Have you read the docs?
url: https://argo-cd.readthedocs.io/
about: Much help can be found in the docs
- name: Ask a question
url: https://github.com/argoproj/argo-cd/discussions/new
about: Ask a question or start a discussion about Argo CD
- name: Chat on Slack
url: https://argoproj.github.io/community/join-slack
about: Maybe chatting with the community can help

View File

@@ -1,17 +1,7 @@
Note on DCO:
If the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.
Checklist:
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
* [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue.
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
* [ ] Does this PR require documentation updates?
* [ ] I've updated documentation as required by this PR.
* [ ] Optional. My organization is added to USERS.md.
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)
* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).
* [ ] I've signed the CLA and my build is green ([troubleshooting builds](https://argoproj.github.io/argo-cd/developer-guide/ci/)).

View File

@@ -10,28 +10,27 @@ on:
branches:
- 'master'
env:
# Golang version to use across CI steps
GOLANG_VERSION: '1.18'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build-docker:
name: Build Docker image
runs-on: ubuntu-latest
if: github.head_ref != ''
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build Docker image
run: |
make image
check-go:
name: Ensure Go modules synchronicity
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
uses: actions/setup-go@v1
with:
go-version: ${{ env.GOLANG_VERSION }}
go-version: '1.14.2'
- name: Download all Go modules
run: |
go mod download
@@ -45,13 +44,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
uses: actions/setup-go@v1
with:
go-version: ${{ env.GOLANG_VERSION }}
go-version: '1.14.2'
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
uses: actions/cache@v1
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -62,43 +61,33 @@ jobs:
run: make build-local
lint-go:
permissions:
contents: read # for actions/checkout to fetch code
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
name: Lint Go code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
uses: actions/checkout@v2
- name: Run golangci-lint
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
uses: golangci/golangci-lint-action@v1
with:
version: v1.45.2
args: --timeout 10m --exclude SA5011 --verbose
version: v1.26
args: --timeout 5m
test-go:
name: Run unit tests for Go packages
runs-on: ubuntu-latest
needs:
- build-go
env:
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
uses: actions/setup-go@v1
with:
go-version: ${{ env.GOLANG_VERSION }}
go-version: '1.14.2'
- name: Install required packages
run: |
sudo apt-get install git -y
@@ -110,23 +99,17 @@ jobs:
run: |
git fetch --prune --no-tags --depth=1 origin +refs/heads/*:refs/remotes/origin/*
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
run: echo "::add-path::/home/runner/go/bin"
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
run: echo "::add-path::/usr/local/bin"
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
uses: actions/cache@v1
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
# We install kustomize in the dist directory
- name: Add dist to PATH
run: |
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
@@ -137,98 +120,34 @@ jobs:
- name: Run all unit tests
run: make test-local
- name: Generate code coverage artifacts
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
uses: actions/upload-artifact@v2
with:
name: code-coverage
path: coverage.out
- name: Generate test results artifacts
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
uses: actions/upload-artifact@v2
with:
name: test-results
path: test-results/
test-go-race:
name: Run unit tests with -race, for Go packages
runs-on: ubuntu-latest
needs:
- build-go
env:
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
run: |
sudo apt-get install git -y
- name: Switch to temporal branch so we re-attach head
run: |
git switch -c temporal-pr-branch
git status
- name: Fetch complete history for blame information
run: |
git fetch --prune --no-tags --depth=1 origin +refs/heads/*:refs/remotes/origin/*
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
# We install kustomize in the dist directory
- name: Add dist to PATH
run: |
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Download and vendor all required packages
run: |
go mod download
- name: Run all unit tests
run: make test-race-local
- name: Generate test results artifacts
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: race-results
path: test-results/
codegen:
name: Check changes to generated code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
uses: actions/setup-go@v1
with:
go-version: ${{ env.GOLANG_VERSION }}
go-version: '1.14.2'
- name: Create symlink in GOPATH
run: |
mkdir -p ~/go/src/github.com/argoproj
cp -a ../argo-cd ~/go/src/github.com/argoproj
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
run: echo "::add-path::/usr/local/bin"
- name: Add ~/go/bin to PATH
run: echo "::add-path::/home/runner/go/bin"
- name: Download & vendor dependencies
run: |
# We need to vendor go modules for codegen yet
@@ -240,10 +159,9 @@ jobs:
make install-codegen-tools-local
make install-go-tools-local
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
# We install kustomize in the dist directory
- name: Add dist to PATH
- name: Initialize local Helm
run: |
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
helm2 init --client-only
- name: Run codegen
run: |
set -x
@@ -262,14 +180,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
- name: Setup NodeJS
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
uses: actions/setup-node@v1
with:
node-version: '12.18.4'
node-version: '11.15.0'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
uses: actions/cache@v1
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -282,8 +200,6 @@ jobs:
yarn build
env:
NODE_ENV: production
NODE_ONLINE_ENV: online
HOST_ARCH: amd64
working-directory: ui/
- name: Run ESLint
run: yarn lint
@@ -299,12 +215,12 @@ jobs:
sonar_secret: ${{ secrets.SONAR_TOKEN }}
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
uses: actions/cache@v1
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -315,16 +231,16 @@ jobs:
run: |
mkdir -p test-results
- name: Get code coverage artifiact
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
uses: actions/download-artifact@v2
with:
name: code-coverage
- name: Get test result artifact
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
uses: actions/download-artifact@v2
with:
name: test-results
path: test-results
- name: Upload code coverage information to codecov.io
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
uses: codecov/codecov-action@v1
with:
file: coverage.out
- name: Perform static code analysis using SonarCloud
@@ -358,9 +274,6 @@ jobs:
test-e2e:
name: Run end-to-end tests
runs-on: ubuntu-latest
strategy:
matrix:
k3s-version: [v1.23.3, v1.22.6, v1.21.2]
needs:
- build-go
env:
@@ -373,29 +286,16 @@ jobs:
ARGOCD_IN_CI: "true"
ARGOCD_E2E_APISERVER_PORT: "8088"
ARGOCD_SERVER: "127.0.0.1:8088"
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
uses: actions/setup-go@v1
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: GH actions workaround - Kill XSP4 process
run: |
sudo pkill mono || true
# ubuntu-22.04 comes with kubectl, but the version is not pinned. The version as of 2022-12-05 is 1.26.0 which
# breaks the `TestNamespacedResourceDiffing` e2e test. So we'll pin to 1.25 and then fix the underlying issue.
- name: Install kubectl
run: |
rm /usr/local/bin/kubectl
curl -LO https://dl.k8s.io/release/v1.25.4/bin/linux/amd64/kubectl
mv kubectl /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl
go-version: '1.14.2'
- name: Install K3S
env:
INSTALL_K3S_VERSION: ${{ matrix.k3s-version }}+k3s1
INSTALL_K3S_VERSION: v0.5.0
run: |
set -x
curl -sfL https://get.k3s.io | sh -
@@ -405,23 +305,18 @@ jobs:
sudo chown runner $HOME/.kube/config
kubectl version
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
uses: actions/cache@v1
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Add ./dist to PATH
run: |
echo "$(pwd)/dist" >> $GITHUB_PATH
run: echo "::add-path::/usr/local/bin"
- name: Add ~/go/bin to PATH
run: echo "::add-path::/home/runner/go/bin"
- name: Download Go dependencies
run: |
go mod download
go install github.com/mattn/goreman@latest
go get github.com/mattn/goreman
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
@@ -431,9 +326,9 @@ jobs:
git config --global user.email "john.doe@example.com"
- name: Pull Docker image required for tests
run: |
docker pull ghcr.io/dexidp/dex:v2.35.3
docker pull quay.io/dexidp/dex:v2.22.0
docker pull argoproj/argo-cd-ci-builder:v1.0.0
docker pull redis:7.0.7-alpine
docker pull redis:5.0.8-alpine
- name: Create target directory for binaries in the build-process
run: |
mkdir -p dist
@@ -446,11 +341,11 @@ jobs:
# port 8080 which is not visible in netstat -tulpen, but still there
# with a HTTP listener. We have API server listening on port 8088
# instead.
make start-e2e-local 2>&1 | sed -r "s/[[:cntrl:]]\[[0-9]{1,3}m//g" > /tmp/e2e-server.log &
make start-e2e-local &
count=1
until curl -f http://127.0.0.1:8088/healthz; do
sleep 10;
if test $count -ge 180; then
if test $count -ge 60; then
echo "Timeout"
exit 1
fi
@@ -460,9 +355,3 @@ jobs:
run: |
set -x
make test-e2e-local
- name: Upload e2e-server logs
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: e2e-server-k8s${{ matrix.k3s-version }}.log
path: /tmp/e2e-server.log
if: ${{ failure() }}

View File

@@ -2,38 +2,32 @@ name: "Code scanning - action"
on:
push:
# Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot
branches-ignore:
- 'dependabot/**'
pull_request:
schedule:
- cron: '0 19 * * 0'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
CodeQL-Build:
permissions:
actions: read # for github/codeql-action/init to get workflow details
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/autobuild to send a status report
if: github.repository == 'argoproj/argo-cd'
# CodeQL runs on ubuntu-latest and windows-latest
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
with:
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
# If this run was triggered by a pull request event, then checkout
# the head of the pull request instead of the merge commit.
- run: git checkout HEAD^2
if: ${{ github.event_name == 'pull_request' }}
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
uses: github/codeql-action/init@v1
# Override language selection by uncommenting this and choosing your languages
# with:
# languages: go, javascript, csharp, python, cpp, java
@@ -41,7 +35,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@@ -55,4 +49,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
uses: github/codeql-action/analyze@v1

31
.github/workflows/gh-pages.yaml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: Deploy
on:
push:
branches:
- master
pull_request:
branches:
- 'master'
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.x
- name: build
run: |
pip install mkdocs==1.0.4 mkdocs_material==4.1.1
mkdocs build
mkdir ./site/.circleci && echo '{version: 2, jobs: {build: {branches: {ignore: gh-pages}}}}' > ./site/.circleci/config.yml
- name: deploy
if: ${{ github.event_name == 'push' }}
uses: peaceiris/actions-gh-pages@v2.5.0
env:
PERSONAL_TOKEN: ${{ secrets.PERSONAL_TOKEN }}
PUBLISH_BRANCH: gh-pages
PUBLISH_DIR: ./site

View File

@@ -4,150 +4,47 @@ on:
push:
branches:
- master
pull_request:
branches:
- master
types: [ labeled, unlabeled, opened, synchronize, reopened ]
env:
GOLANG_VERSION: '1.18'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
publish:
permissions:
contents: write # for git to push upgrade commit if not already deployed
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-latest
env:
GOPATH: /home/runner/work/argo-cd/argo-cd
steps:
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
- uses: actions/setup-go@v1
with:
go-version: ${{ env.GOLANG_VERSION }}
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
go-version: '1.14.1'
- uses: actions/checkout@master
with:
path: src/github.com/argoproj/argo-cd
# get image tag
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
working-directory: ./src/github.com/argoproj/argo-cd
id: image
# login
# build
- run: |
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
if: github.event_name == 'push'
docker images -a --format "{{.ID}}" | xargs -I {} docker rmi {}
make image DEV_IMAGE=true DOCKER_PUSH=false IMAGE_NAMESPACE=docker.pkg.github.com/argoproj/argo-cd IMAGE_TAG=${{ steps.image.outputs.tag }}
working-directory: ./src/github.com/argoproj/argo-cd
# publish
- run: |
docker login docker.pkg.github.com --username $USERNAME --password $PASSWORD
docker push docker.pkg.github.com/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }}
env:
USERNAME: ${{ secrets.USERNAME }}
PASSWORD: ${{ secrets.TOKEN }}
DOCKER_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
# build
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
- name: Setup cache for argocd-ui docker layer
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-single-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-single-buildx
- name: Build cache for argocd-ui stage
uses: docker/build-push-action@v2
with:
context: ./src/github.com/argoproj/argo-cd
target: argocd-ui
push: false
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
- name: Run non-container Snyk scans
if: github.event_name == 'push'
working-directory: ./src/github.com/argoproj/argo-cd
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
run: |
npm install -g snyk
# Run with high threshold to fail build.
snyk test --org=argoproj --all-projects --exclude=docs,site --severity-threshold=high --policy-path=.snyk
snyk iac test manifests/install.yaml --org=argoproj --severity-threshold=high --policy-path=.snyk
- run: |
IMAGE_PLATFORMS=linux/amd64
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
then
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
fi
echo "Building image for platforms: $IMAGE_PLATFORMS"
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
--cache-from "type=local,src=/tmp/.buildx-cache" \
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
-t quay.io/argoproj/argocd:latest .
working-directory: ./src/github.com/argoproj/argo-cd
- name: Run container Snyk scan
if: github.event_name == 'push'
working-directory: ./src/github.com/argoproj/argo-cd
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
run: |
snyk container test quay.io/argoproj/argocd:latest --org=argoproj --file=Dockerfile --severity-threshold=high
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Clean up build cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
# sign container images
- name: Install cosign
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
with:
cosign-release: 'v1.13.1'
- name: Install crane to get digest of image
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
- name: Get digest of image
run: |
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
- name: Sign Argo CD latest image
run: |
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
# Displays the public key to share.
cosign public-key --key env://COSIGN_PRIVATE_KEY
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ github.event_name == 'push' }}
# deploy
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
if: github.event_name == 'push'
env:
TOKEN: ${{ secrets.TOKEN }}
- run: |
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
docker run -v $(pwd):/src -w /src --rm -t lyft/kustomizer:v3.3.0 kustomize edit set image argoproj/argocd=docker.pkg.github.com/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }}
git config --global user.email 'ci@argoproj.com'
git config --global user.name 'CI'
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
if: github.event_name == 'push'
working-directory: argoproj-deployments/argocd
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-GitHub-Package-Registry/m-p/41202/thread-id/9811
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-Github-Package-Registry/m-p/41202/thread-id/9811

View File

@@ -2,40 +2,32 @@ name: Create ArgoCD release
on:
push:
tags:
- "release-v*"
- "!release-v1.5*"
- "!release-v1.4*"
- "!release-v1.3*"
- "!release-v1.2*"
- "!release-v1.1*"
- "!release-v1.0*"
- "!release-v0*"
env:
GOLANG_VERSION: '1.18'
permissions:
contents: read
- 'release-v*'
- '!release-v1.5*'
- '!release-v1.4*'
- '!release-v1.3*'
- '!release-v1.2*'
- '!release-v1.1*'
- '!release-v1.0*'
- '!release-v0*'
jobs:
prepare-release:
permissions:
contents: write # To push changes to release branch
name: Perform automatic release on trigger ${{ github.ref }}
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-latest
env:
# The name of the tag as supplied by the GitHub event
SOURCE_TAG: ${{ github.ref }}
# The image namespace where Docker image will be published to
IMAGE_NAMESPACE: quay.io/argoproj
IMAGE_NAMESPACE: argoproj
# Whether to create & push image and release assets
DRY_RUN: false
# Whether a draft release should be created, instead of public one
DRAFT_RELEASE: false
# The name of the repository containing tap formulae
TAP_REPOSITORY: argoproj/homebrew-tap
# Whether to update homebrew with this release as well
# Set RELEASE_HOMEBREW_TOKEN secret in repository for this to work - needs
# access to public repositories
# access to public repositories (or homebrew-tap repo specifically)
UPDATE_HOMEBREW: false
# Name of the GitHub user for Git config
GIT_USERNAME: argo-bot
@@ -43,7 +35,7 @@ jobs:
GIT_EMAIL: argoproj@gmail.com
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
uses: actions/checkout@v2
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
@@ -54,7 +46,7 @@ jobs:
# Target version must match major.minor.patch and optional -rcX suffix
# where X must be a number.
TARGET_VERSION=${SOURCE_TAG#*release-v}
if ! echo "${TARGET_VERSION}" | egrep '^[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)*$'; then
if ! echo ${TARGET_VERSION} | egrep '^[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)*$'; then
echo "::error::Target version '${TARGET_VERSION}' is malformed, refusing to continue." >&2
exit 1
fi
@@ -86,10 +78,10 @@ jobs:
fi
# Make the variables available in follow-up steps
echo "TARGET_VERSION=${TARGET_VERSION}" >> $GITHUB_ENV
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> $GITHUB_ENV
echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV
echo "PRE_RELEASE=${PRE_RELEASE}" >> $GITHUB_ENV
echo "::set-env name=TARGET_VERSION::${TARGET_VERSION}"
echo "::set-env name=TARGET_BRANCH::${TARGET_BRANCH}"
echo "::set-env name=RELEASE_TAG::${RELEASE_TAG}"
echo "::set-env name=PRE_RELEASE::${PRE_RELEASE}"
- name: Check if our release tag has a correct annotation
run: |
@@ -100,7 +92,7 @@ jobs:
echo "=========== BEGIN COMMIT MESSAGE ============="
git show ${SOURCE_TAG}
echo "============ END COMMIT MESSAGE =============="
# Quite dirty hack to get the release notes from the annotated tag
# into a temporary file.
RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX)
@@ -111,16 +103,16 @@ jobs:
# Whatever is in commit history for the tag, we only want that
# annotation from our tag. We discard everything else.
if test "$begin" = "false"; then
if echo "$line" | grep -q "tag ${SOURCE_TAG#refs/tags/}"; then begin="true"; fi
if echo $line | grep -q "tag ${SOURCE_TAG#refs/tags/}"; then begin="true"; fi
continue
fi
if test "$prefix" = "true"; then
if test -z "$line"; then prefix=false; fi
else
if echo "$line" | egrep -q '^commit [0-9a-f]+'; then
if echo $line | egrep -q '^commit [0-9a-f]+'; then
break
fi
echo "$line" >> ${RELEASE_NOTES}
echo $line >> ${RELEASE_NOTES}
fi
done
@@ -144,12 +136,12 @@ jobs:
# We store path to temporary release notes file for later reading, we
# need it when creating release.
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
echo "::set-env name=RELEASE_NOTES::$RELEASE_NOTES"
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
uses: actions/setup-go@v1
with:
go-version: ${{ env.GOLANG_VERSION }}
go-version: '1.14.2'
- name: Setup Git author information
run: |
@@ -177,10 +169,7 @@ jobs:
run: |
set -ue
make install-codegen-tools-local
# We install kustomize in the dist directory
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
helm2 init --client-only
make manifests-local VERSION=${TARGET_VERSION}
git diff
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
@@ -191,60 +180,31 @@ jobs:
echo "Creating release ${RELEASE_TAG}"
git tag ${RELEASE_TAG}
- name: Login to docker repositories
env:
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
QUAY_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
run: |
set -ue
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
# Remove the following when Docker Hub is gone
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
if: ${{ env.DRY_RUN != 'true' }}
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
- name: Build and push Docker image for release
- name: Build Docker image for release
run: |
set -ue
git clean -fd
mkdir -p dist/
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
make image IMAGE_TAG="${TARGET_VERSION}" DOCKER_PUSH=false
make release-cli
make checksums
chmod +x ./dist/argocd-linux-amd64
./dist/argocd-linux-amd64 version --client
if: ${{ env.DRY_RUN != 'true' }}
- name: Install cosign
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
with:
cosign-release: 'v1.13.1'
- name: Install crane to get digest of image
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
- name: Get digest of image
run: |
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
- name: Sign Argo CD container images and assets
run: |
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST }}
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
# Retrieves the public key to release as an asset
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
- name: Push docker image to repository
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
run: |
set -ue
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
docker push ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
if: ${{ env.DRY_RUN != 'true' }}
- name: Read release notes file
id: release-notes
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
with:
uses: juliangruber/read-file-action@v1
with:
path: ${{ env.RELEASE_NOTES }}
- name: Push changes to release branch
@@ -253,8 +213,8 @@ jobs:
git push origin ${TARGET_BRANCH}
git push origin ${RELEASE_TAG}
- name: Dry run GitHub release
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
- name: Create GitHub release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
id: create_release
@@ -264,74 +224,66 @@ jobs:
draft: ${{ env.DRAFT_RELEASE }}
prerelease: ${{ env.PRE_RELEASE }}
body: ${{ steps.release-notes.outputs.content }}
if: ${{ env.DRY_RUN == 'true' }}
- name: Generate SBOM (spdx)
id: spdx-builder
env:
# defines the spdx/spdx-sbom-generator version to use.
SPDX_GEN_VERSION: v0.0.13
# defines the sigs.k8s.io/bom version to use.
SIGS_BOM_VERSION: v0.2.1
# comma delimited list of project relative folders to inspect for package
# managers (gomod, yarn, npm).
PROJECT_FOLDERS: ".,./ui"
# full qualified name of the docker image to be inspected
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
run: |
yarn install --cwd ./ui
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
go install sigs.k8s.io/bom/cmd/bom@$SIGS_BOM_VERSION
# Generate SPDX for project dependencies analyzing package managers
for folder in $(echo $PROJECT_FOLDERS | sed "s/,/ /g")
do
generator -p $folder -o /tmp
done
# Generate SPDX for binaries analyzing the docker image
if [[ ! -z $DOCKER_IMAGE ]]; then
bom generate -o /tmp/bom-docker-image.spdx -i $DOCKER_IMAGE
fi
cd /tmp && tar -zcf sbom.tar.gz *.spdx
if: ${{ env.DRY_RUN != 'true' }}
- name: Sign sbom
run: |
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ env.DRY_RUN != 'true' }}
- name: Create GitHub release
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
- name: Upload argocd-linux-amd64 binary to release assets
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: ${{ env.RELEASE_TAG }}
tag_name: ${{ env.RELEASE_TAG }}
draft: ${{ env.DRAFT_RELEASE }}
prerelease: ${{ env.PRE_RELEASE }}
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
files: |
dist/argocd-*
/tmp/sbom.tar.gz
/tmp/sbom.tar.gz.sig
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./dist/argocd-linux-amd64
asset_name: argocd-linux-amd64
asset_content_type: application/octet-stream
if: ${{ env.DRY_RUN != 'true' }}
- name: Update homebrew formula
- name: Upload argocd-darwin-amd64 binary to release assets
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./dist/argocd-darwin-amd64
asset_name: argocd-darwin-amd64
asset_content_type: application/octet-stream
if: ${{ env.DRY_RUN != 'true' }}
- name: Upload argocd-windows-amd64 binary to release assets
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./dist/argocd-windows-amd64.exe
asset_name: argocd-windows-amd64.exe
asset_content_type: application/octet-stream
if: ${{ env.DRY_RUN != 'true' }}
- name: Check out homebrew tap repository
uses: actions/checkout@v2
env:
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
with:
token: ${{env.HOMEBREW_TOKEN}}
formula: argocd
repository: ${{ env.TAP_REPOSITORY }}
path: homebrew-tap
fetch-depth: 0
token: ${{ env.HOMEBREW_TOKEN }}
if: ${{ env.HOMEBREW_TOKEN != '' && env.UPDATE_HOMEBREW == 'true' && env.PRE_RELEASE != 'true' }}
- name: Update homebrew tap formula
env:
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
run: |
set -ue
cd homebrew-tap
./update.sh argocd ${TARGET_VERSION}
git commit -am "Update argocd to ${TARGET_VERSION}"
git push
cd ..
rm -rf homebrew-tap
if: ${{ env.HOMEBREW_TOKEN != '' && env.UPDATE_HOMEBREW == 'true' && env.PRE_RELEASE != 'true' }}
- name: Delete original request tag from repository
run: |
set -ue
git push --delete origin ${SOURCE_TAG}
if: ${{ always() }}
if: ${{ always() }}

13
.gitignore vendored
View File

@@ -2,9 +2,7 @@
.idea/
.DS_Store
vendor/
dist/*
ui/dist/app/*
!ui/dist/app/gitkeep
dist/
site/
*.iml
# delve debug binaries
@@ -14,12 +12,3 @@ coverage.out
test-results
.scannerwork
.scratch
node_modules/
.kube/
./test/cmp/*.sock
# ignore built binaries
cmd/argocd/argocd
cmd/argocd-application-controller/argocd-application-controller
cmd/argocd-repo-server/argocd-repo-server
cmd/argocd-server/argocd-server

17
.gitpod.Dockerfile vendored
View File

@@ -1,17 +0,0 @@
FROM gitpod/workspace-full
USER root
RUN curl -o /usr/local/bin/kubectl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x /usr/local/bin/kubectl
RUN curl -L https://go.kubebuilder.io/dl/2.3.1/$(go env GOOS)/$(go env GOARCH) | \
tar -xz -C /tmp/ && mv /tmp/kubebuilder_2.3.1_$(go env GOOS)_$(go env GOARCH) /usr/local/kubebuilder
RUN apt-get install redis-server -y
RUN go install github.com/mattn/goreman@latest
USER gitpod
ENV ARGOCD_REDIS_LOCAL=true
ENV KUBECONFIG=/tmp/kubeconfig

View File

@@ -1,6 +0,0 @@
image:
file: .gitpod.Dockerfile
tasks:
- init: make mod-download-local dep-ui-local && GO111MODULE=off go install github.com/mattn/goreman@latest
command: make start-test-k8s

22
.golangci.yml Normal file
View File

@@ -0,0 +1,22 @@
run:
timeout: 2m
skip-files:
- ".*\\.pb\\.go"
skip-dirs:
- pkg/client/
- vendor/
linters:
enable:
- vet
- deadcode
- goimports
- varcheck
- structcheck
- ineffassign
- unconvert
- unparam
linters-settings:
goimports:
local-prefixes: github.com/argoproj/argo-cd
service:
golangci-lint-version: 1.21.0

View File

@@ -1,7 +0,0 @@
version: 2
formats: all
mkdocs:
fail_on_warning: false
python:
install:
- requirements: docs/requirements.txt

22
.snyk
View File

@@ -1,22 +0,0 @@
# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
version: v1.22.1
# ignores vulnerabilities until expiry date; change duration by modifying expiry date
ignore:
SNYK-JS-ANSIREGEX-1583908:
- '*':
reason: >-
Code is only run client-side in the swagger-ui endpoint. No risk of
server-side DoS.
SNYK-CC-K8S-44:
- 'manifests/core-install.yaml > *':
reason: >-
Argo CD needs wide permissions to manage resources.
- 'manifests/install.yaml > *':
reason: >-
Argo CD needs wide permissions to manage resources.
SNYK-JS-MOMENT-2440688:
- '*':
reason: >-
Code is only run client-side. No risk of directory traversal.
patch: {}

View File

@@ -1,620 +1,6 @@
# Changelog
## v2.4.0 (Unreleased)
### Web Terminal In Argo CD UI
Feature enables engineers to start a shell in the running application container without leaving the web interface. Just find the required Kubernetes
Pod using the Application Details page, click on it and select the Terminal tab. The shell starts automatically and enables you to execute the required
commands, and helps to troubleshoot the application state.
### Access Control For Pod Logs & Web Terminal
Argo CD is used to manage the critical infrastructure of multiple organizations, which makes security the top priority of the project. We've listened to
your feedback and introduced additional access control settings that control access to Kubernetes Pod logs and the new Web Terminal feature.
#### Pod Logs UI
Since 2.4.9, the LOGS tab in pod view is visible in the UI only for users with explicit allow get logs policy.
#### Known pod logs UI issue prior to 2.4.9
Upon pressing the "LOGS" tab in pod view by users who don't have an explicit allow get logs policy, the red "unable to load data: Internal error" is received in the bottom of the screen, and "Failed to load data, please try again" is displayed.
### OpenTelemetry Tracing Integration
The new feature allows emitting richer telemetry data that might make identifying performance bottlenecks easier. The new feature is available for argocd-server
and argocd-repo-server components and can be enabled using the --otlp-address flag.
### Power PC and IBM Z Support
The list of supported architectures has been expanded, and now includes IBM Z (s390x) and PowerPC (ppc64le). Starting with the v2.4 release the official quay.io
repository is going to have images for amd64, arm64, ppc64le, and s390x architectures.
### Other Notable Changes
Overall v2.4 release includes more than 300 hundred commits from nearly 90 contributors. Here is a short sample of the contributions:
* Enforce the deployment to remote clusters only
* Native support of GCP authentication for GKE
* Secured Redis connection
* ApplicationSet Gitea support
## v2.3.3 (2022-03-29)
- fix: prevent excessive repo-server disk usage for large repos (#8845) (#8897)
- fix: Set QPS and burst rate for resource ops client (#8915)
## v2.3.2 (2022-03-22)
- fix: application resource APIs must enforce project restrictions
## v2.3.1 (2022-03-10)
- fix: Retry checkbox unchecked unexpectedly; Sync up with YAML (#8682) (#8720)
- chore: Bump stable version of application set addon (#8744)
- fix: correct jsonnet paths resolution (#8721)
- fix(ui): Applications page incorrectly resets to tiles view. Fixes #8702 (#8718)
## v2.3.0 (2022-03-05)
### Argo CD ApplicationSet and Notifications are now part of Argo CD
Two popular [Argoproj Labs](https://github.com/argoproj-labs) projects [Argo CD ApplicationSet](https://github.com/argoproj/applicationset) and
[Argo CD Notifications](https://github.com/argoproj-labs/argocd-notifications) are now part of Argo CD! The default Argo CD installation manifests now
bundle both projects out of the box. Going forward you can expect more tightened integration of these projects into Argo CD.
### New sync and diff strategies
Users can now configure the Application resource to instruct Argo CD to consider the ignore difference setup during the sync process.
In order to do so, add the new sync option RespectIgnoreDifferences=true in the Application resource. Once the sync option is added,
Argo CD won't change ignored fields during the syncing process.
Configuring ignored fields is also easier now. Instead of listing fields one by one users can now leverage the
managedFields metadata to instruct Argo CD about trusted managers and automatically ignore any fields owned by them. A new diff customization
(managedFieldsManagers) is now available allowing users to specify managers the application should trust and to ignore all fields owned by those managers.
Read more about these changes at [New sync and diff strategies in ArgoCD](https://blog.argoproj.io/new-sync-and-diff-strategies-in-argocd-44195d3f8b8c) blog post.
### ARM Images
An officially supported ARM 64 image is now available. Enjoy running Argo CD on your Raspberry Pi! Additionally, the image size was reduced by nearly ~50%
and is only 200MB now. The ARM version of `argocd` CLI is also available and published as a Github release artifact.
### Compact Tree View And Click Application Navigation
The application details page now supports compact application resources tree visualization. Using the "Group Nodes" button, you can collapse the similar resources
into a single group node to remove the clutter and make it easier to understand the state of application resources. You still can get detailed information about the collapsed resources by clicking on the group node. The list of collapsed resources will be available in a sliding panel. Compact resource tree is still too big?
You can use the zoom in and zoom out feature to make it smaller - or even larger!
You no longer need to move back and forth between the application details page and the application list page. Instead you can navigate directly to the required application by clicking the search icon in the application details page title.
### Upgraded Config Management Tools
Both bundled Helm and Kustomize binaries have been upgraded to the latest versions. Kustomize has been upgraded from 4.2.0 to 4.4.1 and Helm has been upgraded from 3.7.1 to 3.8.0.
### Bug Fixes and Performance Enhancements
* Config management tools enhancements:
* The skipCrds flag and ability to ignore missing values files for Helm (#8012, #8003)
* Additional environment variables for Kustomize (#8096)
* Argo CD CLI follows the XDG Base directory standard (#7638)
* Redis is no longer used during SSO login (#8241)
### Features
- feat: Add app list and details page views to navigation history (#7776) (#7937)
- feat: Add skipCrds flag for helm charts (#8012)
- feat: Add visual indicator for newly created pods (#8006)
- feat: Added a new Helm option ignoreMissingValueFiles (#7767) (#8003)
- feat: Allow configuring system wide ignore differences for all resources (#8224)
- feat: Allow escaping dollar in Envsubst (#7961)
- feat: Allow external links on Application (#3487) (#8231)
- feat: Allow selecting application on detail page (#8176)
- feat: Bundle applicationset-controller with argocd (#8148)
- feat: Enable specifying root ca for oidc (#6712)
- feat: Expose ARGOCD_APP_NAME to the `kustomize build` command (#8096)
- feat: Ignore differences owned by trusted managers from managedFields (#7869)
- feat: New sync option to use ignore diff configs during sync (#8078)
- feat: Provide address flag for admin dashboard command (#8095)
- feat: Store "Group Nodes" button state in application details preferences (#8036)
- feat: Support specifying cluster by name in addition to API server URL in Cluster API (#8077)
- feat: Support XDG Base directory standard (#7638) (#7791)
- feat: Use encrypted cookie to store OAuth2 state nonce (instead of redis) (#8241)
- feat: Build images on PR and conditionally build arm64 image on push (#8108)
### Bug Fixes
- fix: Add "Restarting MinIO" status to MiniO Tenant health check (#8191)
- fix: Add all resources in list view (#7295)
- fix: Adding pagination to grouped nodes sliding panel#7837 (#7915)
- fix: Allow all resources to add external links (#7923)
- fix: Always call ValidateDestination (#7976)
- fix: Application exist panic when execute api call (#8188)
- fix: Application-icons-alignment (#8054)
- fix: Controller panics if resource manifest has incorrect annotation (#8022)
- fix: Correctly handle project field during partial cluster update (#7994)
- fix: Default value for retry validation #8055 (#8064)
- fix: Fix a possible crash when parsing RBAC (#8165)
- fix: Grouped node list missing resources on Compact resources view #8014 (#8018)
- fix: Issue with headless installation (#7958)
- fix: Issue with project scoped resources (#8048)
- fix: Kubernetes labels normalization for Prometheus (#7925)
- fix: Nested Refresh dropdown does not work on Application Details page #1524 (#7950)
- fix: Network line colors and menu icon alignment (#8059)
- fix: Opening app details shows UI error on some apps (#8016) (#8019)
- fix: Parse to correct uint32 type (#8177)
- fix: Prevent possible nil-pointer deref in normalizer (#8185)
- fix: Prevent possible out-of-bounds access when loading policies (#8186)
- fix: Provide a semantic version parsed version for KUBE_VERSION (#8250)
- fix: Refreshing label toast (#7979)
- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971)
- fix: Retry disabled text (#8004)
- fix: Route health check stuck in 'Progressing' (#8170)
- fix: Sync window panel is crashed if resource name not contain letters (#8053)
- fix: Targetervision compatible without prefix refs/heads or refs/tags (#7939)
- fix: Trailing line in Filter Dropdown Menus #7821 (#8001)
- fix: Webhook URL matching edge cases (#7981)
- fix(ui): Use consistent case for diff modes (#7945)
- fix: Use gRPC timeout for sidecar CMPs (#8131) (#8236)
### Other
- chore: Bump go-jsonnet to v0.18.0 (#8011)
- chore: Escape proj in regex (#7985)
- chore: Exclude argocd-server rbac for core-install (#8234)
- chore: Log out the resource triggering reconciliation (#8192)
- chore: Migrate to use golang-jwt/jwt v4.2.0 (#8136)
- chore: Move resolveRevision from api-server to repo-server (#7966)
- chore: Update notifications version (#8267)
- chore: Update slack version (#8299)
- chore: Update to Redis 6.2.4 (#8157)
- chore: Upgrade awscli to 2.4.6 and remove python deps (#7947)
- chore: Upgrade base image to ubuntu:21.10 (#8230)
- chore: Upgrade dex to v2.30.2 (https://github.com/dexidp/dex/issues/2326) (#8237)
- chore: Upgrade gitops engine (#8288)
- chore: Upgrade golang to 1.17.6 (#8229)
- chore: Upgrade helm to most recent version (v3.7.2) (#8226)
- chore: Upgrade k8s client to v1.23 (#8213)
- chore: Upgrade kustomize to most recent version (v4.4.1) (#8227)
- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133)
- refactor: Move project filtering to server side (#8102)
## v2.2.3 (2022-01-18)
- fix: Application exist panic when execute api call (#8188)
- fix: Route health check stuck in 'Progressing' (#8170)
- refactor: Introduce 'byClusterName' secret index to speedup cluster server URL lookup (#8133)
- chore: Update to Redis 6.2.4 (#8157) (#8158)
## v2.2.2 (2021-12-31)
- fix: Issue with project scoped resources (#8048)
- fix: Escape proj in regex (#7985)
- fix: Default value for retry validation #8055 (#8064)
- fix: Sync window panel is crashed if resource name not contain letters (#8053)
- fix: Upgrade github.com/argoproj/gitops-engine to v0.5.2
- fix: Retry disabled text (#8004)
- fix: Opening app details shows UI error on some apps (#8016) (#8019)
- fix: Correctly handle project field during partial cluster update (#7994)
- fix: Cluster API does not support updating labels and annotations (#7901)
## v2.2.1 (2021-12-16)
- fix: Resource details page crashes when resource is not deployed and hide managed fields is selected (#7971)
- fix: Issue with headless installation (#7958)
- fix: Nil pointer (#7905)
## v2.2.0 (2021-12-14)
> [Upgrade instructions](./docs/operator-manual/upgrading/2.1-2.2.md)
### Project Scoped repositories and clusters
The project scoped repositories and clusters is a feature that simplifies registering the repositories and cluster credentials.
Instead of requiring operators to set up in advance all clusters and git repositories that can be used, developers can now do
this on their own in a self-service manner.
### Config Management Plugins V2
The Config Management Plugins V2 is set of enhancement of the existing config management plugins feature.
The list includes improved installation experience, ability to package plugin into a separate image and
improved plugin manifests discovery.
### Resource tracking
Argo CD has traditionally tracked the resources it manages by the well-known "app.kubernetes.io/instance" property.
While using this property works ok in simple scenarios, it also has several limitations. ArgoCD now allows you to use
a new annotation (argocd.argoproj.io/tracking-id) for tracking your resources. Using this annotation is a much more flexible approach
as there are no conflicts with other Kubernetes tools, and you can easily install multiple Argo CD instances on the same clusters.
### Bug Fixes and Performance Enhancements
* Argo CD API server caches RBAC checks that significantly improves the GET /api/v1/applications API performance (#7587)
* Argo CD RBAC supports regex matches (#7165)
* Health check support for KubeVirt (#7176), Cassandra (#7017), Openshift Route (#7112), DeploymentConfig (#7114), Confluent (#6957) and SparkApplication (#7434) CRDs.
* Persistent banner (#7312) with custom positioning (#7462)
* Cluster name support in project destinations (#7198)
* around 30 more features and a total of 84 bug fixes
## v2.1.7 (2021-12-14)
- fix: issue with keepalive (#7861)
- fix nil pointer dereference error (#7905)
- fix: env vars to tune cluster cache were broken (#7779)
- fix: upgraded gitops engine to v0.4.2 (fixes #7561)
## v2.1.6 (2021-11-16)
- fix: don't use revision caching during app creation (#7508)
- fix: supporting OCI dependencies. Fixes #6062 (#6994)
## v2.1.5 (2021-11-16)
- fix: Invalid memory address or nil pointer dereference in processRequestedAppOperation (#7501)
## v2.1.4 (2021-11-15)
- fix: Operation has completed with phase: Running (#7482)
- fix: Application status panel shows Syncing instead of Deleting (#7486)
- fix(ui): Add Error Boundary around Extensions and comply with new Extensions API (#7215)
## v2.1.3 (2021-10-29)
- fix: core-install.yaml always refers to latest argocd image (#7321)
- fix: handle applicationset backup forbidden error (#7306)
- fix: Argo CD should not use cached git/helm revision during app creation/update validation (#7244)
## v2.1.2 (2021-10-02)
- fix: cluster filter popping out of box (#7135)
- fix: gracefully shutdown metrics server when dex config changes (#7138)
- fix: upgrade gitops engine version to v0.4.1 (#7088)
- fix: repository name already exists when multiple helm dependencies (#7096)
## v2.1.1 (2021-08-25)
### Bug Fixes
- fix: password reset requirements (#7071)
- fix: Custom Styles feature is broken (#7067)
- fix(ui): Add State to props passed to Extensions (#7045)
- fix: keep uid_entrypoint.sh for backward compatibility (#7047)
## v2.1.0 (2021-08-20)
> [Upgrade instructions](./docs/operator-manual/upgrading/2.0-2.1.md)
### Argo CD Core
Argo CD Core - lightweight Argo CD distribution that packages only core GitOps features and relies
on Kubernetes API/RBAC to power UI and CLI.
### Core Features
* The synchronization process became much much faster and requires significantly less memory.
* An additional caching that ensures that each repository's target revisions are queried only once per
reconciliation cycle. This dramatically reduces the number of Git requests.
* Improved Diffing Customizations: use JQ path expressions to exclude required fields from the diffing.
* Health assessment support for new CRDs: introduced health assessment of CRDs from trident.netapp.io,
elasticsearch.k8s.elastic.co, cluster.x-k8s.io, and minio.min.io API groups.
### Improved Settings
A set of changes had been implemented to simplify configuring Argo CD.
* Simplified Repository Registration: you no longer need to modify the argocd-cm ConfigMap to register a
new Git or Helm repository.
* Enhanced Resource Customizations: the resource.customizations key has been deprecated in favor of
a separate ConfigMap key per resource.
* Reference secret values from any Kubernetes secret: starting v2.1 you can use sensitive data stored in
any Kubernetes secret to configure Argo CD.
* Simplify parametrization of Argo CD server processes: an additional optional ConfigMap argocd-cmd-params-cm
has been introduced.
### Refreshed User Interface
* Enhanced and more consistent filters on Applications List and Applications Details pages.
* Status bar on the Application List page.
* The redesigned search box on the Application List page and more.
### The argocd-util CLI deprecation
The argocd CLI and now available under argocd admin subcommand.
## v2.0.5 (2021-07-22)
* fix: allow argocd-notification ingress to repo-server (#6746)
* fix: argocd-server crashes due to nil pointer dereference (#6757)
* fix: WebUI failure when loading pod view 't.parentRefs is undefined' (#6490) (#6535)
* fix: prevent 'cannot read property "filter" of undefined' during nodes filtering (#6453)
* fix: download Pod Logs button not honouring argocd-server rootpath (#6548) (#6627)
* fix: Version warning banner in docs (#6682)
* fix: upgrade gitops engine to fix workflow health check
## v2.0.4 (2021-06-22)
* fix: typo in networkPolicy definition in manifests (#6532)
* fix: Update redis to 6.2.4 (#6475)
* fix: allows access to dex metrics from any pod (#6420)
* fix: add client side retry to prevent 'transport is closing' errors (#6402)
* fix: Update documentation Argocd app CRD health with app of apps (#6281)
* fix(ui): Crash on application pod view (#6384)
* chore: pin mkdocs version to fix docs build (#6421)
* chore: regenerate manifests using codegen (#6422)
* refactor: use RLock and RUnlock for project to improve performance (#6225)
* chore: Update Golang to v1.16.4 (#6358)
## v2.0.3 (2021-05-27)
### Bug Fixes
* fix: add missing --container flag to 'argocd app logs' command (#6320)
* fix: grpc web proxy must ensure to read full header (#6319)
* fix: controller should refresh app before running sync operation (#6294)
## v2.0.2 (2021-05-20)
### Bug Fixes
* fix: enable access to metrics port in embedded network policies (#6277)
* fix: display log streaming error in logs viewer (#6100) (#6273)
* fix: Don't count errored or completed neighbor pods toward resource consumption (#6259)
* fix: Enable kex algo diffie-hellman-group-exchange-sha256 for go-git ssh (#6256)
* fix: copy github app key from repocreds (#6140, #6197)
* fix(ui): UI crashes after reinstalling ArgoCD (#6218)
* fix: add network policies to restrict traffic flow between argocd components (#6156)
* fix: Revert "feat: Add health checks for kubernetes-external-secrets (#5435)"
* chore: Allow ingress traffic to argocd-server by default (#6179)
## v2.0.1 (2021-04-15)
### Bug Fixes
* fix: spark application check fails on missing section (#6036)
* fix: Adding explicit bind to redis and sentinel for IPv4 clusters #5957 (#6005)
* fix: fix: use correct field for evaluating whether or not GitHub Enterprise is selected (#5987)
## v2.0.0 (2021-04-07)
> [Upgrade instructions](./docs/operator-manual/upgrading/1.8-2.0.md)
### Pods View
Pods View is particularly useful for applications that have hundreds of pods. Instead of visualizing all Kubernetes
resources for the application, it only shows Kubernetes pods and closely related resources. The Pods View supports
grouping related resources by Parent Resource, Top Level Parent, or by Node. Each way of grouping solves a particular
use case. For example grouping by Top Level Parent allows you to quickly find how many pods your application is running
and which resources created them. Grouping by Node allows to see how Pods are spread across the nodes and how many
resources they requested.
### Logs Viewer
Argo CD provides a way to see live logs of pods, which is very useful for debugging and troubleshooting. In the v2.0
release, the log visualization has been rewritten to support pagination, filtering, the ability to disable/enable log
streaming, and even a dark mode for terminal lovers. Do you want to see aggregated logs of multiple deployment pods?
Not a problem! Just click on the parent resource such as Deployment, ReplicaSet, or StatefulSet and navigate
to the Logs tab.
### Banner Feature
Want to notify your Argo CD users of upcoming changes? Just specify the notification message and optional URL using the
`ui.bannercontent` and `ui.bannerurl` attributes in the `argocd-cm` ConfigMap.
### Core Features
* The new sync option `PrunePropagationPolicy=background` allows using background deletion during syncing
* New application finalizer `resources-finalizer.argocd.argoproj.io:background` allows using background deletion when the application is deleted
* The new sync option `ApplyOutOfSyncOnly=true` allows skipping syncing resources that are already in the desired state.
* The new sync option `PruneLast=true` allows deferring resource pruning until the last synchronization phase after all other resources are synced and healthy.
### The argocd-util CLI
Argo CD Util is a CLI tool that contains useful commands for operators who manage Argo CD. Starting from this release
the Argo CD Utility is published with every Argo CD release as a Homebrew installation.
## v1.8.7 (2021-02-26)
### Important note
This release fixed a regression regarding which cluster resources are permitted on the AppProject level.
Previous to this fix, after #3960 has been merged, all cluster resources were allowed on project level when neither of
the allow or deny lists was defined. However, the correct behavior is to block all resources in this case.
If you have Projects with empty allow and deny lists, but want the associated applications be able to sync cluster
resources, you will have to adapt your cluster resources allow lists to explicitly allow the resources.
- fix: redact sensitive data in logs (#5662)
- fix: Properly escape HTML for error message from CLI SSO (#5563)
- fix: Empty resource whitelist allowed all resources (#5540) (#5551)
## v1.8.6 (2021-02-26)
- fix: Properly escape HTML for error message from CLI SSO (#5563)
- fix: API server should not print resource body when resource update fails (#5617)
- fix: fix memory leak in application controller (#5604)
## v1.8.5 (2021-02-19)
- fix: 'argocd app wait --suspended' stuck if operation is in progress (#5511)
- fix: Presync hooks stop working after namespace resource is added in a Helm chart #5522
- docs: add the missing rbac resources to the documentation (#5476)
- refactor: optimize argocd-application-controller redis usage (#5345)
## v1.8.4 (2021-02-05)
- feat: set X-XSS-Protection while serving static content (#5412)
- fix: version info should be avaialble if anonymous access is enabled (#5422)
- fix: disable jwt claim audience validation #5381 (#5413)
- fix: /api/version should not return tools version for unauthenticated requests (#5415)
- fix: account tokens should be rejected if required capability is disabled (#5414)
- fix: tokens keep working after account is deactivated (#5402)
- fix: a request which was using a revoked project token, would still be allowed to perform requests allowed by default policy (#5378)
## v1.8.3 (2021-01-21)
- fix: make sure JWT token time fields contain only integer values (#5228)
## v1.8.2 (2021-01-09)
### Bug Fixes
- fix: updating cluster drops secret (#5220)
- fix: remove invalid assumption about OCI helm chart path (#5179)
- fix: Possible nil pointer dereference in repository API (#5128)
- fix: Possible nil pointer dereference in repocreds API (#5130)
- fix: use json serialization to store cache instead of github.com/vmihailenco/msgpack (#4965)
- fix: add liveness probe to restart repo server if it fails to server tls requests (#5110) (#5119)
- fix: Allow correct SSO redirect URL for CLI static client (#5098)
- fix: add grpc health check (#5060)
- fix: setting 'revision history limit' errors in UI (#5035)
- fix: add api-server liveness probe that catches bad data in informer (#5026)
### Refactoring
- chore: Update Dex to v2.27.0 (#5058)
- chore: Upgrade gorilla/handlers and gorilla/websocket (#5186)
- chore: Upgrade jwt-go to 4.0.0-preview1 (#5184)
## v1.8.1 (2020-12-09)
- fix: sync retry is broken for multi-phase syncs (#5017)
## v1.8.0 (2020-12-09)
### Mono-Repository Improvements
Enhanced performance during manifest generation from mono-repository - the repository that represents the
desired state of the whole cluster and contains hundreds of applications. The improved argocd-repo-server
now able to concurrently generate manifests from the same repository and for the same commit SHA. This
might provide 10x performance improvement of manifests generation.
### Annotation Based Path Detection
The feature that allows specifying which source repository directories influence the application manifest generation
using the `argocd.argoproj.io/manifest-generate-paths` annotation. The annotation improves the Git webhook handler
behavior. The webhook avoids related applications reconciliation if no related files have been changed by the Git commit
and even allows to skip manifests generation for new commit by re-using generation manifests for the previous commit.
### Horizontal Controller Scaling
This release allows scaling the `argocd-application-controller` horizontally. This allows you to manage as many Kubernetes clusters
as needed using a single Argo CD instance.
## New Core Functionality Features
Besides performance improvements, Argo CD got a lot of usability enhancements and new features:
* Namespace and CRD creation [#4354](https://github.com/argoproj/argo-cd/issues/4354)
* Unknown fields of built-in K8S types [#1787](https://github.com/argoproj/argo-cd/issues/1787)
* Endpoints Diffing [#1816](https://github.com/argoproj/argo-cd/issues/1816)
* Better compatibility with Helm Hooks [#1816](https://github.com/argoproj/argo-cd/issues/1816)
* App-of-Apps Health Assessment [#3781](https://github.com/argoproj/argo-cd/issues/3781)
## Global Projects
This release makes it easy to manage an Argo CD that has hundreds of Projects. Instead of duplicating the same organization-wide rules in all projects
you can put such rules into one project and make this project “global” for all other projects. Rules defined in the global project are inherited by all
other projects and therefore dont have to be duplicated. The sample below demonstrates how you can create a global project and specify which project should
inherit global project rules using Kubernetes labels.
## User Interface Improvements
The Argo CD user interface is an important part of a project and we keep working hard on improving the user experience. Here is an incomplete list of implemented improvements:
* Improved Applications Filters [#4622](https://github.com/argoproj/argo-cd/issues/4622)
* Git tags and branches autocompletion [#4713](https://github.com/argoproj/argo-cd/issues/4713)
* Project Details Page [#4400](https://github.com/argoproj/argo-cd/issues/4400)
* New version information panel [#4376](https://github.com/argoproj/argo-cd/issues/4376)
* Progress Indicators [#4411](https://github.com/argoproj/argo-cd/issues/4411)
* External links annotations [#4380](https://github.com/argoproj/argo-cd/issues/4380) and more!
## Config Management Tools Enhancements
* OCI Based Repositories [#4018](https://github.com/argoproj/argo-cd/issues/4018)
* Configurable Helm Versions [#4111](https://github.com/argoproj/argo-cd/issues/4111)
## Bug fixes and under the hood changes
In addition to new features and enhancements, weve fixed more than 50 bugs and upgraded third-party components and libraries that Argo CD relies on.
## v1.7.9 (2020-11-17)
- fix: improve commit verification tolerance (#4825)
- fix: argocd diff --local should not print data of local secrets (#4850)
- fix(ui): stack overflow crash of resource tree view for large applications (#4685)
- chore: Update golang to v1.14.12 [backport to release-1.7] (#4834)
- chore: Update redis to 5.0.10 (#4767)
- chore: Replace deprecated GH actions directives for integration tests (#4589)
## v1.7.8 (2020-10-15)
- fix(logging.go): changing marshaler for JSON logging to use gogo (#4319)
- fix: login with apiKey capability (#4557)
- fix: api-server should not try creating default project it is exists already (#4517)
- fix: JS error on application list page if app has no namespace (#4499)
## v1.7.7 (2020-09-28)
- fix: Support transition from a git managed namespace to auto create (#4401)
- fix: reduce memory spikes during cluster cache refresh (#4298)
- fix: No error/warning condition if application destination namespace not monitored by Argo CD (#4329)
- fix: Fix local diff/sync of apps using cluster name (#4201)
## v1.7.6 (2020-09-18)
- fix: Added cluster authentication to AKS clusters (#4265)
- fix: swagger UI stuck loading (#4377)
- fix: prevent 'argocd app sync' hangs if sync is completed too quickly (#4373)
- fix: argocd app wait/sync might stuck (#4350)
- fix: failed syncs are not retried soon enough (#4353)
## v1.7.5 (2020-09-15)
- fix: app create with -f should not ignore other options (#4322)
- fix: limit concurrent list requests across all clusters (#4328)
- fix: fix possible deadlock in /v1/api/stream/applications and /v1/api/application APIs (#4315)
- fix: WatchResourceTree does not enforce RBAC (#4311)
- fix: app refresh API should use app resource version (#4303)
- fix: use informer instead of k8s watch to ensure app is refreshed (#4290)
## v1.7.4 (2020-09-04)
- fix: automatically stop watch API requests when page is hidden (#4269)
- fix: upgrade gitops-engine dependency (issues #4242, #1881) (#4268)
- fix: application stream API should not return 'ADDED' events if resource version is provided (#4260)
- fix: return parsing error (#3942)
- fix: JS error when using cluster filter in the /application view (#4247)
- fix: improve applications list page client side performance (#4244)
## v1.7.3 (2020-09-01)
- fix: application details page crash when app is deleted (#4229)
- fix: api-server unnecessary normalize projects on every start (#4219)
- fix: load only project names in UI (#4217)
- fix: Re-create already initialized ARGOCD_GNUPGHOME on startup (#4214) (#4223)
- fix: Add openshift as a dex connector type which requires a redirectURI (#4222)
- fix: Replace status.observedAt with redis pub/sub channels for resource tree updates (#1340) (#4208)
- fix: cache inconsistency of child resources (#4053) (#4202)
- fix: do not include kube-api check in application liveness flow (#4163)
## v1.7.2 (2020-08-27)
- fix: Sync hangs with cert-manager on latest RC (#4105)
- fix: support for PKCE for cli login (#2932)
## v1.7.2 (2020-08-25)
- fix: Unable to create project JWT token on K8S v1.15 (#4165)
- fix: Argo CD does not exclude creationTimestamp from diffing (#4157)
## v1.7.0 (2020-08-24)
## v1.7.0 (Unreleased)
### GnuPG Signature Verification
@@ -711,7 +97,7 @@ use cases, such as bootstrapping a Kubernetes cluster, or decentralized manageme
#### Other
- refactoring: GitOps engine (#3066)
- refactoring: Gitops engine (#3066)
## v1.5.8 (2020-06-16)
@@ -774,7 +160,7 @@ customizations, custom resource health checks, and more.
### Other
* New Project and Application CRD settings ([#2900](https://github.com/argoproj/argo-cd/issues/2900), [#2873](https://github.com/argoproj/argo-cd/issues/2873)) that allows customizing Argo CD behavior.
* Upgraded Dex (v2.22.0) enables seamless [SSO integration](https://www.openshift.com/blog/openshift-authentication-integration-with-argocd) with OpenShift.
* Upgraded Dex (v2.22.0) enables seamless [SSO integration](https://www.openshift.com/blog/openshift-authentication-integration-with-argocd) with Openshift.
#### Enhancements
@@ -806,7 +192,7 @@ customizations, custom resource health checks, and more.
* fix for helm repo add with flag --insecure-skip-server-verification (#3420)
* fix: app diff --local support for helm repo. #3151 (#3407)
* fix: Syncing apps incorrectly states "app synced", but this is not true (#3286)
* fix: for jsonnet when it is located in nested subdirectory and uses import (#3372)
* fix: for jsonnet when it is localed in nested subdirectory and uses import (#3372)
* fix: Update 4.5.3 redis-ha helm manifest (#3370)
* fix: return 401 error code if username does not exist (#3369)
* fix: Do not panic while running hooks with short revision (#3368)
@@ -922,7 +308,7 @@ Last-minute bugs that will be addressed in 1.5.1 shortly:
- fix: argocd-util backup produced truncated backups. import app status (#3096)
- fix: upgrade redis-ha chart and enable haproxy (#3147)
- fix: make dex server deployment init container resilient to restarts (#3136)
- fix: redact secret values of manifests stored in git (#3088)
- fix: reduct secret values of manifests stored in git (#3088)
- fix: labels not being deleted via UI (#3081)
- fix: HTTP|HTTPS|NO_PROXY env variable reading #3055 (#3063)
- fix: Correct usage text for repo add command regarding insecure repos (#3068)
@@ -1029,10 +415,10 @@ More documentation and tools are coming in patch releases.
The Argo CD deletes all **in-flight** hooks if you terminate running sync operation. The hook state assessment change implemented in this release the Argo CD enables detection of
an in-flight state for all Kubernetes resources including `Deployment`, `PVC`, `StatefulSet`, `ReplicaSet` etc. So if you terminate the sync operation that has, for example,
`StatefulSet` hook that is `Progressing` it will be deleted. The long-running jobs are not supposed to be used as a sync hook and you should consider using
[Sync Waves](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-waves/) instead.
[Sync Waves](https://argoproj.github.io/argo-cd/user-guide/sync-waves/) instead.
#### Enhancements
* feat: Add custom health checks for cert-manager v0.11.0 (#2689)
* feat: Add custom healthchecks for cert-manager v0.11.0 (#2689)
* feat: add git submodule support (#2495)
* feat: Add repository credential management API and CLI (addresses #2136) (#2207)
* feat: add support for --additional-headers cli flag (#2467)
@@ -1217,7 +603,7 @@ There may be instances when you want to control the times during which an Argo C
#### Bug Fixes
- failed parsing on parameters with comma (#1660)
- StatefulSet with OnDelete Update Strategy stuck progressing (#1881)
- Statefulset with OnDelete Update Strategy stuck progressing (#1881)
- Warning during secret diffing (#1923)
- Error message "Unable to load data: key is missing" is confusing (#1944)
- OIDC group bindings are truncated (#2006)
@@ -1299,7 +685,7 @@ There may be instances when you want to control the times during which an Argo C
## v1.2.3 (2019-10-1)
* Make argo-cd docker images openshift friendly (#2362) (@duboisf)
* Add dest-server and dest-namespace field to reconciliation logs (#2354)
- Stop logging /repository.RepositoryService/ValidateAccess parameters (#2386)
- Stop loggin /repository.RepositoryService/ValidateAccess parameters (#2386)
## v1.2.2 (2019-09-26)
+ Resource action equivalent to `kubectl rollout restart` (#2177)
@@ -1384,7 +770,7 @@ Support for Git LFS enabled repositories - now you can store Helm charts as tar
- Wait for CRD creation during sync process (#1940)
- Added a button to select out of sync items in the sync panel (#1902)
- Proper handling of an excluded resource in an application (#1621)
- Stop repeating logs on stopped container (#1614)
- Stop repeating logs on stoped container (#1614)
- Fix git repo url parsing on application list view (#2174)
- Fix nil pointer dereference error during app reconciliation (#2146)
- Fix history api fallback implementation to support app names with dots (#2114)
@@ -1440,7 +826,7 @@ optimized which significantly reduced the number of Git requests. With v1.1 rele
#### User Defined Application Metadata
User-defined Application metadata enables the user to define a list of useful URLs for their specific application and expose those links on the UI
(e.g. reference to a CI pipeline or an application-specific management tool). These links should provide helpful shortcuts that make easier to integrate Argo CD into existing
(e.g. reference tp a CI pipeline or an application-specific management tool). These links should provide helpful shortcuts that make easier to integrate Argo CD into existing
systems by making it easier to find other components inside and outside Argo CD.
### Deprecation Notice
@@ -1804,7 +1190,7 @@ has a minimum client version of v0.12.0. Older CLI clients will be rejected.
* Deprecate componentParameterOverrides in favor of source specific config (#1207)
* Support talking to Dex using local cluster address instead of public address (#1211)
* Use Recreate deployment strategy for controller (#1315)
* Honor OS environment variables for helm commands (#1306) (@1337andre)
* Honor os environment variables for helm commands (#1306) (@1337andre)
* Disable CGO_ENABLED for server/controller binaries (#1286)
* Documentation fixes and improvements (@twz123, @yann-soubeyrand, @OmerKahani, @dulltz)
- Fix CRD creation/deletion handling (#1249)
@@ -1894,7 +1280,7 @@ running Dex (e.g. Okta, OneLogin, Auth0, Microsoft, etc...)
The optional, [Dex IDP OIDC provider](https://github.com/dexidp/dex) is still bundled as part of the
default installation, in order to provide a seamless out-of-box experience, enabling Argo CD to
integrate with non-OIDC providers, and to benefit from Dex's full range of
[connectors](https://dexidp.io/docs/connectors/).
[connectors](https://github.com/dexidp/dex/tree/master/Documentation/connectors).
#### OIDC group bindings to Project Roles
OIDC group claims from an OAuth2 provider can now be bound to a Argo CD project roles. Previously,
@@ -2296,8 +1682,8 @@ RBAC policy rules, need to be rewritten to include one extra column with the eff
+ Override parameters
## v0.1.0 (2018-03-12)
+ Define app in GitHub with dev and preprod environment using KSonnet
+ Define app in Github with dev and preprod environment using KSonnet
+ Add cluster Diff App with a cluster Deploy app in a cluster
+ Deploy a new version of the app in the cluster
+ App sync based on GitHub app config change - polling only
+ App sync based on Github app config change - polling only
+ Basic UI: App diff between Git and k8s cluster for all environments Basic GUI

View File

@@ -1,77 +1,84 @@
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
ARG BASE_IMAGE=debian:10-slim
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.18 AS builder
FROM golang:1.14.1 as builder
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
RUN apt-get update && apt-get install --no-install-recommends -y \
RUN apt-get update && apt-get install -y \
openssh-server \
nginx \
unzip \
fcgiwrap \
git \
git-lfs \
make \
wget \
gcc \
sudo \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
WORKDIR /tmp
COPY hack/install.sh hack/tool-versions.sh ./
COPY hack/installers installers
ADD hack/install.sh .
ADD hack/installers installers
ADD hack/tool-versions.sh .
RUN ./install.sh helm-linux && \
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
RUN ./install.sh packr-linux
RUN ./install.sh kubectl-linux
RUN ./install.sh ksonnet-linux
RUN ./install.sh helm2-linux
RUN ./install.sh helm-linux
RUN ./install.sh kustomize-linux
####################################################################################################
# Argo CD Base - used as the base for both the release and dev argocd images
####################################################################################################
FROM $BASE_IMAGE AS argocd-base
FROM $BASE_IMAGE as argocd-base
USER root
ENV DEBIAN_FRONTEND=noninteractive
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
RUN groupadd -g 999 argocd && \
useradd -r -u 999 -g argocd argocd && \
mkdir -p /home/argocd && \
chown argocd:0 /home/argocd && \
chmod g=u /home/argocd && \
chmod g=u /etc/passwd && \
apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y \
git git-lfs tini gpg tzdata && \
apt-get install -y git git-lfs python3-pip tini gpg && \
apt-get clean && \
pip3 install awscli==1.18.80 && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
COPY hack/gpg-wrapper.sh /usr/local/bin/gpg-wrapper.sh
COPY hack/git-verify-wrapper.sh /usr/local/bin/git-verify-wrapper.sh
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
COPY --from=builder /usr/local/bin/helm2 /usr/local/bin/helm2
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
# keep uid_entrypoint.sh for backward compatibility
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
# script to add current (possibly arbitrary) user to /etc/passwd at runtime
# (if it's not already there, to be openshift friendly)
COPY uid_entrypoint.sh /usr/local/bin/uid_entrypoint.sh
# support for mounting configuration from a configmap
WORKDIR /app/config/ssh
RUN touch ssh_known_hosts && \
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
RUN mkdir -p /app/config/ssh && \
touch /app/config/ssh/ssh_known_hosts && \
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
WORKDIR /app/config
RUN mkdir -p tls && \
mkdir -p gpg/source && \
mkdir -p gpg/keys && \
chown argocd gpg/keys && \
chmod 0700 gpg/keys
RUN mkdir -p /app/config/tls
RUN mkdir -p /app/config/gpg/source && \
mkdir -p /app/config/gpg/keys && \
chown argocd /app/config/gpg/keys && \
chmod 0700 /app/config/gpg/keys
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=argocd
USER 999
@@ -80,52 +87,46 @@ WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 AS argocd-ui
FROM node:11.15.0 as argocd-ui
WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]
ADD ["ui/package.json", "ui/yarn.lock", "./"]
RUN yarn install --network-timeout 200000 && \
yarn cache clean
RUN yarn install
COPY ["ui/", "."]
ADD ["ui/", "."]
ARG ARGO_VERSION=latest
ENV ARGO_VERSION=$ARGO_VERSION
ARG TARGETARCH
RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OPTIONS=--max_old_space_size=8192 yarn build
RUN NODE_ENV='production' yarn build
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.18 AS argocd-build
FROM golang:1.14.1 as argocd-build
COPY --from=builder /usr/local/bin/packr /usr/local/bin/packr
WORKDIR /go/src/github.com/argoproj/argo-cd
COPY go.* ./
COPY go.mod go.mod
COPY go.sum go.sum
RUN go mod download
# Perform the build
COPY . .
COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app
ARG TARGETOS
ARG TARGETARCH
RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make argocd-all
RUN make cli-local server controller repo-server argocd-util
ARG BUILD_ALL_CLIS=true
RUN if [ "$BUILD_ALL_CLIS" = "true" ] ; then \
make CLI_NAME=argocd-darwin-amd64 GOOS=darwin cli-local && \
make CLI_NAME=argocd-windows-amd64.exe GOOS=windows cli-local \
; fi
####################################################################################################
# Final image
####################################################################################################
FROM argocd-base
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
USER root
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
USER 999
COPY --from=argocd-ui ./src/dist/app /shared/app

View File

@@ -2,14 +2,5 @@
# argocd-dev
####################################################################################################
FROM argocd-base
COPY argocd /usr/local/bin/
USER root
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller
USER 999
COPY argocd* /usr/local/bin/
COPY --from=argocd-ui ./src/dist/app /shared/app

250
Makefile
View File

@@ -1,10 +1,7 @@
PACKAGE=github.com/argoproj/argo-cd/v2/common
PACKAGE=github.com/argoproj/argo-cd/common
CURRENT_DIR=$(shell pwd)
DIST_DIR=${CURRENT_DIR}/dist
CLI_NAME=argocd
BIN_NAME=argocd
GEN_RESOURCES_CLI_NAME=argocd-resources-gen
HOST_OS:=$(shell go env GOOS)
HOST_ARCH:=$(shell go env GOARCH)
@@ -14,8 +11,8 @@ BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
GIT_COMMIT=$(shell git rev-parse HEAD)
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run github.com/gobuffalo/packr/packr"; fi)
VOLUME_MOUNT=$(shell if test "$(go env GOOS)" = "darwin"; then echo ":delegated"; elif test selinuxenabled; then echo ":delegated"; else echo ""; fi)
KUBECTL_VERSION=$(shell go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)
GOPATH?=$(shell if test -x `which go`; then go env GOPATH; else echo "$(HOME)/go"; fi)
GOCACHE?=$(HOME)/.cache/go-build
@@ -25,11 +22,6 @@ DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
ARGOCD_PROCFILE?=Procfile
# Strict mode has been disabled in latest versions of mkdocs-material.
# Thus pointing to the older image of mkdocs-material matching the version used by argo-cd.
MKDOCS_DOCKER_IMAGE?=squidfunk/mkdocs-material:4.1.1
MKDOCS_RUN_ARGS?=
# Configuration for building argocd-test-tools image
TEST_TOOLS_NAMESPACE?=
TEST_TOOLS_IMAGE=argocd-test-tools
@@ -45,31 +37,17 @@ ARGOCD_E2E_REPOSERVER_PORT?=8081
ARGOCD_E2E_REDIS_PORT?=6379
ARGOCD_E2E_DEX_PORT?=5556
ARGOCD_E2E_YARN_HOST?=localhost
ARGOCD_E2E_DISABLE_AUTH?=
ARGOCD_E2E_TEST_TIMEOUT?=30m
ARGOCD_IN_CI?=false
ARGOCD_TEST_E2E?=true
ARGOCD_BIN_MODE?=true
ARGOCD_LINT_GOGC?=20
# Depending on where we are (legacy or non-legacy pwd), we need to use
# different Docker volume mounts for our source tree
LEGACY_PATH=$(GOPATH)/src/github.com/argoproj/argo-cd
ifeq ("$(PWD)","$(LEGACY_PATH)")
DOCKER_SRC_MOUNT="$(DOCKER_SRCDIR):/go/src$(VOLUME_MOUNT)"
else
DOCKER_SRC_MOUNT="$(PWD):/go/src/github.com/argoproj/argo-cd$(VOLUME_MOUNT)"
endif
# Runs any command in the argocd-test-utils container in server mode
# Server mode container will start with uid 0 and drop privileges during runtime
define run-in-test-server
docker run --rm -it \
--name argocd-test-server \
-u $(shell id -u):$(shell id -g) \
-e USER_ID=$(shell id -u) \
-e HOME=/home/user \
-e GOPATH=/go \
@@ -77,11 +55,7 @@ define run-in-test-server
-e ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
-e ARGOCD_E2E_TEST=$(ARGOCD_E2E_TEST) \
-e ARGOCD_E2E_YARN_HOST=$(ARGOCD_E2E_YARN_HOST) \
-e ARGOCD_E2E_DISABLE_AUTH=$(ARGOCD_E2E_DISABLE_AUTH) \
-e ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} \
-e ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} \
-e ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} \
-v ${DOCKER_SRC_MOUNT} \
-v ${DOCKER_SRCDIR}:/go/src${VOLUME_MOUNT} \
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
@@ -89,7 +63,6 @@ define run-in-test-server
-w ${DOCKER_WORKDIR} \
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
-p 4000:4000 \
-p 5000:5000 \
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
bash -c "$(1)"
endef
@@ -98,13 +71,13 @@ endef
define run-in-test-client
docker run --rm -it \
--name argocd-test-client \
-u $(shell id -u):$(shell id -g) \
-u $(shell id -u) \
-e HOME=/home/user \
-e GOPATH=/go \
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
-e GOCACHE=/tmp/go-build-cache \
-e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \
-v ${DOCKER_SRC_MOUNT} \
-v ${DOCKER_SRCDIR}:/go/src${VOLUME_MOUNT} \
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
@@ -114,9 +87,9 @@ define run-in-test-client
bash -c "$(1)"
endef
#
#
define exec-in-test-server
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
docker exec -it -u $(shell id -u) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
endef
PATH:=$(PATH):$(PWD)/hack
@@ -135,8 +108,7 @@ override LDFLAGS += \
-X ${PACKAGE}.version=${VERSION} \
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}
ifeq (${STATIC_BUILD}, true)
override LDFLAGS += -extldflags "-static"
@@ -160,56 +132,30 @@ IMAGE_PREFIX=${IMAGE_NAMESPACE}/
endif
.PHONY: all
all: cli image
# We have some legacy requirements for being checked out within $GOPATH.
# The ensure-gopath target can be used as dependency to ensure we are running
# within these boundaries.
.PHONY: ensure-gopath
ensure-gopath:
ifneq ("$(PWD)","$(LEGACY_PATH)")
@echo "Due to legacy requirements for codegen, repository needs to be checked out within \$$GOPATH"
@echo "Location of this repo should be '$(LEGACY_PATH)' but is '$(PWD)'"
@exit 1
endif
all: cli image argocd-util
.PHONY: gogen
gogen: ensure-gopath
gogen:
export GO111MODULE=off
go generate ./util/argo/...
.PHONY: protogen
protogen: ensure-gopath mod-vendor-local
protogen:
export GO111MODULE=off
./hack/generate-proto.sh
.PHONY: openapigen
openapigen: ensure-gopath
openapigen:
export GO111MODULE=off
./hack/update-openapi.sh
.PHONY: notification-catalog
notification-catalog:
go run ./hack/gen-catalog catalog
.PHONY: notification-docs
notification-docs:
go run ./hack/gen-docs
go run ./hack/gen-catalog docs
.PHONY: clientgen
clientgen: ensure-gopath
clientgen:
export GO111MODULE=off
./hack/update-codegen.sh
.PHONY: clidocsgen
clidocsgen: ensure-gopath
go run tools/cmd-docs/main.go
.PHONY: codegen-local
codegen-local: ensure-gopath mod-vendor-local notification-docs notification-catalog gogen protogen clientgen openapigen clidocsgen manifests-local
codegen-local: mod-vendor-local gogen protogen clientgen openapigen manifests-local
rm -rf vendor/
.PHONY: codegen
@@ -222,25 +168,32 @@ cli: test-tools-image
.PHONY: cli-local
cli-local: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
.PHONY: gen-resources-cli-local
gen-resources-cli-local: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
.PHONY: cli-docker
go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
.PHONY: release-cli
release-cli: clean-debug build-ui
make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all
make BIN_NAME=argocd-darwin-arm64 GOOS=darwin GOARCH=arm64 argocd-all
make BIN_NAME=argocd-linux-amd64 GOOS=linux argocd-all
make BIN_NAME=argocd-linux-arm64 GOOS=linux GOARCH=arm64 argocd-all
make BIN_NAME=argocd-linux-ppc64le GOOS=linux GOARCH=ppc64le argocd-all
make BIN_NAME=argocd-linux-s390x GOOS=linux GOARCH=s390x argocd-all
make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all
release-cli: clean-debug image
docker create --name tmp-argocd-linux $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)
docker cp tmp-argocd-linux:/usr/local/bin/argocd ${DIST_DIR}/argocd-linux-amd64
docker cp tmp-argocd-linux:/usr/local/bin/argocd-darwin-amd64 ${DIST_DIR}/argocd-darwin-amd64
docker cp tmp-argocd-linux:/usr/local/bin/argocd-windows-amd64.exe ${DIST_DIR}/argocd-windows-amd64.exe
docker rm tmp-argocd-linux
.PHONY: argocd-util
argocd-util: clean-debug
# Build argocd-util as a statically linked binary, so it could run within the alpine-based dex container (argoproj/argo-cd#844)
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
# .PHONY: dev-tools-image
# dev-tools-image:
# docker build -t $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE) . -f hack/Dockerfile.dev-tools
# docker tag $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE) $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE):$(DEV_TOOLS_VERSION)
.PHONY: test-tools-image
test-tools-image:
docker build --build-arg UID=$(shell id -u) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
docker build -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
.PHONY: manifests-local
@@ -251,28 +204,24 @@ manifests-local:
manifests: test-tools-image
$(call run-in-test-client,make manifests-local IMAGE_NAMESPACE='${IMAGE_NAMESPACE}' IMAGE_TAG='${IMAGE_TAG}')
# consolidated binary for cli, util, server, repo-server, controller
.PHONY: argocd-all
argocd-all: clean-debug
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
# NOTE: we use packr to do the build instead of go, since we embed swagger files and policy.csv
# files into the go binary
.PHONY: server
server: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
.PHONY: repo-server
repo-server:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
.PHONY: controller
controller:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
.PHONY: build-ui
build-ui:
DOCKER_BUILDKIT=1 docker build -t argocd-ui --target argocd-ui .
find ./ui/dist -type f -not -name gitkeep -delete
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
.PHONY: packr
packr:
go build -o ${DIST_DIR}/packr github.com/gobuffalo/packr/packr/
.PHONY: image
ifeq ($(DEV_IMAGE), true)
@@ -280,25 +229,29 @@ ifeq ($(DEV_IMAGE), true)
# which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since
# the dist directory is under .dockerignore.
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
image: build-ui
DOCKER_BUILDKIT=1 docker build -t argocd-base --target argocd-base .
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
image: packr
docker build -t argocd-base --target argocd-base .
docker build -t argocd-ui --target argocd-ui .
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd/argocd
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-darwin-amd64 ./cmd/argocd
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-windows-amd64.exe ./cmd/argocd
cp Dockerfile.dev dist
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
else
image:
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
endif
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
.PHONY: armimage
# The "BUILD_ALL_CLIS" argument is to skip building the CLIs for darwin and windows
# which would take a really long time.
armimage:
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm . --build-arg BUILD_ALL_CLIS="false"
.PHONY: builder-image
builder-image:
@@ -311,7 +264,7 @@ mod-download: test-tools-image
.PHONY: mod-download-local
mod-download-local:
go mod download && go mod tidy # go mod download changes go.sum https://github.com/golang/go/issues/42970
go mod download
.PHONY: mod-vendor
mod-vendor: test-tools-image
@@ -355,7 +308,7 @@ build: test-tools-image
# Build all Go code (local version)
.PHONY: build-local
build-local:
build-local:
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
# Run all unit tests
@@ -376,24 +329,10 @@ test-local:
./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
.PHONY: test-race
test-race: test-tools-image
mkdir -p $(GOCACHE)
$(call run-in-test-client,make TEST_MODULE=$(TEST_MODULE) test-race-local)
# Run all unit tests, with data race detection, skipping known failures (local version)
.PHONY: test-race-local
test-race-local:
if test "$(TEST_MODULE)" = ""; then \
./hack/test.sh -race -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
else \
./hack/test.sh -race -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
# Run the E2E test suite. E2E test servers (see start-e2e target) must be
# started before.
.PHONY: test-e2e
test-e2e:
test-e2e:
$(call exec-in-test-server,make test-e2e-local)
# Run the E2E test suite (local version)
@@ -401,7 +340,7 @@ test-e2e:
test-e2e-local: cli-local
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
export GO111MODULE=off
ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v ./test/e2e
ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout 20m -v ./test/e2e
# Spawns a shell in the test server container for debugging purposes
debug-test-server: test-tools-image
@@ -420,32 +359,28 @@ start-e2e: test-tools-image
# Starts e2e server locally (or within a container)
.PHONY: start-e2e-local
start-e2e-local: mod-vendor-local dep-ui-local cli-local
start-e2e-local:
kubectl create ns argocd-e2e || true
kubectl config set-context --current --namespace=argocd-e2e
kustomize build test/manifests/base | kubectl apply -f -
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
# Create GPG keys and source directories
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
if test "$(USER_ID)" != ""; then chown -R "$(USER_ID)" /tmp/argo-e2e; fi
# set paths for locally managed ssh known hosts and tls certs data
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
ARGOCD_GPG_ENABLED=true \
ARGOCD_E2E_DISABLE_AUTH=false \
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
BIN_MODE=$(ARGOCD_BIN_MODE) \
ARGOCD_E2E_TEST=true \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
goreman -f $(ARGOCD_PROCFILE) start
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in by golang embed
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
.PHONY: clean-debug
clean-debug:
-find ${CURRENT_DIR} -name debug.test | xargs rm -f
@@ -461,7 +396,7 @@ start: test-tools-image
# Starts a local instance of ArgoCD
.PHONY: start-local
start-local: mod-vendor-local dep-ui-local
start-local: mod-vendor-local
# check we can connect to Docker to start Redis
killall goreman || true
kubectl create ns argocd || true
@@ -471,16 +406,10 @@ start-local: mod-vendor-local dep-ui-local
mkdir -p /tmp/argocd-local/gpg/source
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=false \
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
ARGOCD_GPG_ENABLED=true \
ARGOCD_E2E_TEST=false \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
# Run goreman start with exclude option , provide exclude env variable with list of services
.PHONY: run
run:
bash ./hack/goreman-start.sh
# Runs pre-commit validation with the virtualized toolchain
.PHONY: pre-commit
pre-commit: codegen build lint test
@@ -498,22 +427,22 @@ release-precheck: manifests
.PHONY: release
release: pre-commit release-precheck image release-cli
.PHONY: build-docs-local
build-docs-local:
mkdocs build
.PHONY: build-docs
build-docs:
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} build
.PHONY: serve-docs-local
serve-docs-local:
mkdocs serve
mkdocs build
.PHONY: serve-docs
serve-docs:
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000
mkdocs serve
.PHONY: lint-docs
lint-docs:
# https://github.com/dkhamsing/awesome_bot
find docs -name '*.md' -exec grep -l http {} + | xargs docker run --rm -v $(PWD):/mnt:ro dkhamsing/awesome_bot -t 3 --allow-dupe --allow-redirect --white-list `cat white-list | grep -v "#" | tr "\n" ','` --skip-save-results --
.PHONY: publish-docs
publish-docs: lint-docs
mkdocs gh-deploy
# Verify that kubectl can connect to your K8s cluster from Docker
.PHONY: verify-kube-connect
@@ -535,13 +464,17 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
# Installs all tools required for running unit & end-to-end tests (Linux packages)
.PHONY: install-test-tools-local
install-test-tools-local:
./hack/install.sh kustomize
./hack/install.sh helm-linux
sudo ./hack/install.sh packr-linux
sudo ./hack/install.sh kubectl-linux
sudo ./hack/install.sh kustomize-linux
sudo ./hack/install.sh ksonnet-linux
sudo ./hack/install.sh helm2-linux
sudo ./hack/install.sh helm-linux
# Installs all tools required for running codegen (Linux packages)
.PHONY: install-codegen-tools-local
install-codegen-tools-local:
./hack/install.sh codegen-tools
sudo ./hack/install.sh codegen-tools
# Installs all tools required for running codegen (Go packages)
.PHONY: install-go-tools-local
@@ -554,18 +487,3 @@ dep-ui: test-tools-image
dep-ui-local:
cd ui && yarn install
start-test-k8s:
go run ./hack/k8s
.PHONY: list
list:
@LC_ALL=C $(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$'
.PHONY: applicationset-controller
applicationset-controller:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
.PHONY: checksums
checksums:
sha256sum ./dist/$(BIN_NAME)-* | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BIN_NAME)-$(TARGET_VERSION)-checksums.txt

21
OWNERS
View File

@@ -5,25 +5,8 @@ owners:
approvers:
- alexec
- alexmt
- dthomson25
- jannfis
- jessesuen
- jgwest
- keithchong
- mayzhang2000
- rbreeze
- leoluz
- crenshaw-dev
- pasha-codefresh
reviewers:
- dthomson25
- tetchel
- terrytangyuan
- wtam2018
- ishitasequeira
- reginapizza
- hblixt
- chetan-rns
- wanghong230
- ciiay
- saumeya
- rachelwang20

View File

@@ -1,12 +1,8 @@
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:7.0.0-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
cmp-server: [ "$ARGOCD_E2E_TEST" == 'true' ] && exit 0 || [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --staticassets ui/dist/app"
dex: sh -c "go run github.com/argoproj/argo-cd/cmd/argocd-util gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/dexidp/dex:v2.22.0 serve /dex.yaml"
redis: docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:5.0.8-alpine --save "" --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} go run ./cmd/argocd-repo-server/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
git-server: test/fixture/testrepos/start-git.sh
helm-registry: test/fixture/testrepos/start-helm-registry.sh
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
applicationset-controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
notification: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"

View File

@@ -1,4 +1,7 @@
[![Integration tests](https://github.com/argoproj/argo-cd/workflows/Integration%20tests/badge.svg?branch=master)](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd) [![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4486/badge)](https://bestpractices.coreinfrastructure.org/projects/4486) [![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)
[![Integration tests](https://github.com/argoproj/argo-cd/workflows/Integration%20tests/badge.svg?branch=master)](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd)
[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest)
# Argo CD - Declarative Continuous Delivery for Kubernetes
@@ -8,8 +11,6 @@ Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
![Argo CD UI](docs/assets/argocd-ui.gif)
[![Argo CD Demo](https://img.youtube.com/vi/0WAm0y2vLIo/0.jpg)](https://youtu.be/0WAm0y2vLIo)
## Why Argo CD?
1. Application definitions, configurations, and environments should be declarative and version controlled.
@@ -21,54 +22,19 @@ Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
## Documentation
To learn more about Argo CD [go to the complete documentation](https://argo-cd.readthedocs.io/).
To learn more about Argo CD [go to the complete documentation](https://argoproj.github.io/argo-cd/).
Check live demo at https://cd.apps.argoproj.io/.
## Community
## Community Blogs and Presentations
### Contribution, Discussion and Support
You can reach the Argo CD community and developers via the following channels:
* Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions)
* Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack)
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
* User Community meeting: [First Wednesday of the month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
Participation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
### Blogs and Presentations
1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://blog.akuity.io/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argo-cd-7c5b4057ee49)
1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)
1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)
1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)
1. [Couchbase - How To Run a Database Cluster in Kubernetes Using Argo CD](https://youtu.be/nkPoPaVzExY)
1. [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)
1. [Environments Based On Pull Requests (PRs): Using Argo CD To Apply GitOps Principles On Previews](https://youtu.be/cpAaI8p4R60)
1. [Argo CD: Applying GitOps Principles To Manage Production Environment In Kubernetes](https://youtu.be/vpWQeoaiRM4)
1. [Creating Temporary Preview Environments Based On Pull Requests With Argo CD And Codefresh](https://codefresh.io/continuous-deployment/creating-temporary-preview-environments-based-pull-requests-argo-cd-codefresh/)
1. [Tutorial: Everything You Need To Become A GitOps Ninja](https://www.youtube.com/watch?v=r50tRQjisxw) 90m tutorial on GitOps and Argo CD.
1. [Comparison of Argo CD, Spinnaker, Jenkins X, and Tekton](https://www.inovex.de/blog/spinnaker-vs-argo-cd-vs-tekton-vs-jenkins-x/)
1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://www.ibm.com/cloud/blog/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2)
1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://medium.com/ibm-cloud/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2-4395af317359)
1. [GitOps for Kubeflow using Argo CD](https://v0-6.kubeflow.org/docs/use-cases/gitops-for-kubeflow/)
1. [GitOps Toolsets on Kubernetes with CircleCI and Argo CD](https://www.digitalocean.com/community/tutorials/webinar-series-gitops-tool-sets-on-kubernetes-with-circleci-and-argo-cd)
1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager](https://www.ibm.com/blogs/bluemix/2019/02/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2/)
1. [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be)
1. [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU). Among other things, describes how Kubeflow uses Argo CD to implement GitOPs for ML
1. [Argo CD - GitOps Continuous Delivery for Kubernetes](https://www.youtube.com/watch?v=aWDIQMbp1cc&feature=youtu.be&t=1m4s)
1. [Introduction to Argo CD : Kubernetes DevOps CI/CD](https://www.youtube.com/watch?v=2WSJF7d8dUg&feature=youtu.be)
1. [GitOps Deployment and Kubernetes - using Argo CD](https://medium.com/riskified-technology/gitops-deployment-and-kubernetes-f1ab289efa4b)
1. [Deploy Argo CD with Ingress and TLS in Three Steps: No YAML Yak Shaving Required](https://itnext.io/deploy-argo-cd-with-ingress-and-tls-in-three-steps-no-yaml-yak-shaving-required-bc536d401491)
1. [GitOps Continuous Delivery with Argo and Codefresh](https://codefresh.io/events/cncf-member-webinar-gitops-continuous-delivery-argo-codefresh/)
1. [Stay up to date with Argo CD and Renovate](https://mjpitz.com/blog/2020/12/03/renovate-your-gitops/)
1. [Setting up Argo CD with Helm](https://www.arthurkoziel.com/setting-up-argocd-with-helm/)
1. [Applied GitOps with Argo CD](https://thenewstack.io/applied-gitops-with-argocd/)
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
1. [GitOps Deployment and Kubernetes - using ArgoCD](https://medium.com/riskified-technology/gitops-deployment-and-kubernetes-f1ab289efa4b)

View File

@@ -1,76 +0,0 @@
# Security Policy for Argo CD
Version: **v1.4 (2022-01-23)**
## Preface
As a deployment tool, Argo CD needs to have production access which makes
security a very important topic. The Argoproj team takes security very
seriously and is continuously working on improving it.
## A word about security scanners
Many organisations these days employ security scanners to validate their
container images before letting them on their clusters, and that is a good
thing. However, the quality and results of these scanners vary greatly,
many of them produce false positives and require people to look at the
issues reported and validate them for correctness. A great example of that
is, that some scanners report kernel vulnerabilities for container images
just because they are derived from some distribution.
We kindly ask you to not raise issues or contact us regarding any issues
that are found by your security scanner. Many of those produce a lot of false
positives, and many of these issues don't affect Argo CD. We do have scanners
in place for our code, dependencies and container images that we publish. We
are well aware of the issues that may affect Argo CD and are constantly
working on the remediation of those that affect Argo CD and our users.
If you believe that we might have missed an issue that we should take a look
at (that can happen), then please discuss it with us. If there is a CVE
assigned to the issue, please do open an issue on our GitHub tracker instead
of writing to the security contact e-mail, since things reported by scanners
are public already and the discussion that might emerge is of benefit to the
general community. However, please validate your scanner results and its
impact on Argo CD before opening an issue at least roughly.
## Supported Versions
We currently support the most recent release (`N`, e.g. `1.8`) and the release
previous to the most recent one (`N-1`, e.g. `1.7`). With the release of
`N+1`, `N-1` drops out of support and `N` becomes `N-1`.
We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the
supported versions, which will contain fixes for security vulnerabilities and
important bugs. Prior releases might receive critical security fixes on a best
effort basis, however, it cannot be guaranteed that security fixes get
back-ported to these unsupported versions.
In rare cases, where a security fix needs complex re-design of a feature or is
otherwise very intrusive, and there's a workaround available, we may decide to
provide a forward-fix only, e.g. to be released the next minor release, instead
of releasing it within a patch branch for the currently supported releases.
## Reporting a Vulnerability
If you find a security related bug in ArgoCD, we kindly ask you for responsible
disclosure and for giving us appropriate time to react, analyze and develop a
fix to mitigate the found security vulnerability.
We will do our best to react quickly on your inquiry, and to coordinate a fix
and disclosure with you. Sometimes, it might take a little longer for us to
react (e.g. out of office conditions), so please bear with us in these cases.
We will publish security advisories using the
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
feature to keep our community well informed, and will credit you for your
findings (unless you prefer to stay anonymous, of course).
Please report vulnerabilities by e-mail to the following address:
* cncf-argo-security@lists.cncf.io
## Securing your Argo CD Instance
See the [operator manual security page](docs/operator-manual/security.md) for
additional information about Argo CD's security features and how to make your
Argo CD production ready.

View File

@@ -1,7 +1,7 @@
# Defined below are the security contacts for this repo.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://argo-cd.readthedocs.io/en/latest/security_considerations/#reporting-vulnerabilities
# INSTRUCTIONS AT https://argoproj.github.io/argo-cd/security_considerations/#reporting-vulnerabilities
alexmt
edlee2121

143
USERS.md
View File

@@ -5,204 +5,73 @@ As the Argo Community grows, we'd like to keep track of our users. Please send a
Currently, the following organizations are **officially** using Argo CD:
1. [127Labs](https://127labs.com/)
1. [3Rein](https://www.3rein.com/)
1. [7shifts](https://www.7shifts.com/)
1. [Adevinta](https://www.adevinta.com/)
1. [Adventure](https://jp.adventurekk.com/)
1. [Akuity](https://akuity.io/)
1. [Alibaba Group](https://www.alibabagroup.com/)
1. [Allianz Direct](https://www.allianzdirect.de/)
1. [Ambassador Labs](https://www.getambassador.io/)
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
1. [Ant Group](https://www.antgroup.com/)
1. [AppDirect](https://www.appdirect.com)
1. [Arctiq Inc.](https://www.arctiq.ca)
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
1. [Axual B.V.](https://axual.com)
1. [Baloise](https://www.baloise.com)
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
1. [Beat](https://thebeat.co/en/)
1. [Beez Innovation Labs](https://www.beezlabs.com/)
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
1. [BigPanda](https://bigpanda.io)
1. [BioBox Analytics](https://biobox.io)
1. [BMW Group](https://www.bmwgroup.com/)
1. [Boozt](https://www.booztgroup.com/)
1. [Boticario](https://www.boticario.com.br/)
1. [Camptocamp](https://camptocamp.com)
1. [Capital One](https://www.capitalone.com)
1. [CARFAX](https://www.carfax.com)
1. [Casavo](https://casavo.com)
1. [Celonis](https://www.celonis.com/)
1. [Chargetrip](https://chargetrip.com)
1. [Chime](https://www.chime.com)
1. [Cisco ET&I](https://eti.cisco.com/)
1. [Cobalt](https://www.cobalt.io/)
1. [Codefresh](https://www.codefresh.io/)
1. [Codility](https://www.codility.com/)
1. [Commonbond](https://commonbond.co/)
1. [CROZ d.o.o.](https://croz.net/)
1. [Crédit Agricole CIB](https://www.ca-cib.com)
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
1. [Cybozu](https://cybozu-global.com)
1. [D2iQ](https://www.d2iq.com)
1. [Datarisk](https://www.datarisk.io/)
1. [Deloitte](https://www.deloitte.com/)
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
1. [EDF Renewables](https://www.edf-re.com/)
1. [edX](https://edx.org)
1. [Electronic Arts Inc. ](https://www.ea.com)
1. [Elium](https://www.elium.com)
1. [END.](https://www.endclothing.com/)
1. [Energisme](https://energisme.com/)
1. [Faro](https://www.faro.com/)
1. [Fave](https://myfave.com)
1. [Flip](https://flip.id)
1. [Fonoa](https://www.fonoa.com/)
1. [freee](https://corp.freee.co.jp/en/company/)
1. [Future PLC](https://www.futureplc.com/)
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
1. [Garner](https://www.garnercorp.com)
1. [Generali Deutschland AG](https://www.generali.de/)
1. [Gitpod](https://www.gitpod.io)
1. [Gllue](https://gllue.com)
1. [Glovo](https://www.glovoapp.com)
1. [GMETRI](https://gmetri.com/)
1. [Gojek](https://www.gojek.io/)
1. [Greenpass](https://www.greenpass.com.br/)
1. [Handelsbanken](https://www.handelsbanken.se)
1. [Healy](https://www.healyworld.net)
1. [Helio](https://helio.exchange)
1. [hipages](https://hipages.com.au/)
1. [Hiya](https://hiya.com)
1. [Honestbank](https://honestbank.com)
1. [IBM](https://www.ibm.com/)
1. [Ibotta](https://home.ibotta.com)
1. [IITS-Consulting](https://iits-consulting.de)
1. [imaware](https://imaware.health)
1. [Index Exchange](https://www.indexexchange.com/)
1. [InsideBoard](https://www.insideboard.com)
1. [Intuit](https://www.intuit.com/)
1. [Joblift](https://joblift.com/)
1. [JovianX](https://www.jovianx.com/)
1. [Kaltura](https://corp.kaltura.com/)
1. [KarrotPay](https://www.daangnpay.com/)
1. [Karrot](https://www.daangn.com/)
1. [Kasa](https://kasa.co.kr/)
1. [Keeeb](https://www.keeeb.com/)
1. [Keptn](https://keptn.sh)
1. [Kinguin](https://www.kinguin.net/)
1. [KintoHub](https://www.kintohub.com/)
1. [KompiTech GmbH](https://www.kompitech.com/)
1. [KubeSphere](https://github.com/kubesphere)
1. [LexisNexis](https://www.lexisnexis.com/)
1. [Lightricks](https://www.lightricks.com/)
1. [LINE](https://linecorp.com/en/)
1. [Lytt](https://www.lytt.co/)
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
1. [Major League Baseball](https://mlb.com)
1. [Mambu](https://www.mambu.com/)
1. [MariaDB](https://mariadb.com)
1. [Mattermost](https://www.mattermost.com)
1. [Max Kelsen](https://www.maxkelsen.com/)
1. [MeDirect](https://medirect.com.mt/)
1. [Metanet](http://www.metanet.co.kr/en/)
1. [MindSpore](https://mindspore.cn)
1. [Mirantis](https://mirantis.com/)
1. [mixi Group](https://mixi.co.jp/)
1. [Moengage](https://www.moengage.com/)
1. [Money Forward](https://corp.moneyforward.com/en/)
1. [MOO Print](https://www.moo.com/)
1. [MTN Group](https://www.mtn.com/)
1. [Natura &Co](https://naturaeco.com/)
1. [New Relic](https://newrelic.com/)
1. [Nextdoor](https://nextdoor.com/)
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
1. [Nitro](https://gonitro.com)
1. [Octadesk](https://octadesk.com)
1. [omegaUp](https://omegaUp.com)
1. [openEuler](https://openeuler.org)
1. [openGauss](https://opengauss.org/)
1. [openLooKeng](https://openlookeng.io)
1. [OpenSaaS Studio](https://opensaas.studio)
1. [Opensurvey](https://www.opensurvey.co.kr/)
1. [Optoro](https://www.optoro.com/)
1. [Orbital Insight](https://orbitalinsight.com/)
1. [p3r](https://www.p3r.one/)
1. [Packlink](https://www.packlink.com/)
1. [PayPay](https://paypay.ne.jp/)
1. [Peloton Interactive](https://www.onepeloton.com/)
1. [Pipefy](https://www.pipefy.com/)
1. [Polarpoint.io](https://polarpoint.io)
1. [Preferred Networks](https://preferred.jp/en/)
1. [Prudential](https://prudential.com.sg)
1. [PUBG](https://www.pubg.com)
1. [Qonto](https://qonto.com)
1. [QuintoAndar](https://quintoandar.com.br)
1. [Quipper](https://www.quipper.com/)
1. [Recreation.gov](https://www.recreation.gov/)
1. [Red Hat](https://www.redhat.com/)
1. [RightRev](https://rightrev.com/)
1. [Rise](https://www.risecard.eu/)
1. [Riskified](https://www.riskified.com/)
1. [Robotinfra](https://www.robotinfra.com)
1. [Rubin Observatory](https://www.lsst.org)
1. [Riskified](https://www.riskified.com/)
1. [Saildrone](https://www.saildrone.com/)
1. [Saloodo! GmbH](https://www.saloodo.com)
1. [Sap Labs](http://sap.com)
1. [Schwarz IT](https://jobs.schwarz/it-mission)
1. [Skit](https://skit.ai/)
1. [Skyscanner](https://www.skyscanner.net/)
1. [Smilee.io](https://smilee.io)
1. [Snapp](https://snapp.ir/)
1. [Snyk](https://snyk.io/)
1. [Speee](https://speee.jp/)
1. [Spendesk](https://spendesk.com/)
1. [Spores Labs](https://spores.app)
1. [Stuart](https://stuart.com/)
1. [Sumo Logic](https://sumologic.com/)
1. [Sutpc](http://www.sutpc.com/)
1. [Swiss Post](https://github.com/swisspost)
1. [Swisscom](https://www.swisscom.ch)
1. [Swissquote](https://github.com/swissquote)
1. [Syncier](https://syncier.com/)
1. [TableCheck](https://tablecheck.com/)
1. [Tailor Brands](https://www.tailorbrands.com)
1. [Tamkeen Technologies](https://tamkeentech.sa/)
1. [Technacy](https://www.technacy.it/)
1. [Tesla](https://tesla.com/)
1. [ThousandEyes](https://www.thousandeyes.com/)
1. [Ticketmaster](https://ticketmaster.com)
1. [Tiger Analytics](https://www.tigeranalytics.com/)
1. [Tigera](https://www.tigera.io/)
1. [Toss](https://toss.im/en)
1. [tru.ID](https://tru.id)
1. [Twilio SendGrid](https://sendgrid.com)
1. [tZERO](https://www.tzero.com/)
1. [UBIO](https://ub.io/)
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
1. [ungleich.ch](https://ungleich.ch/)
1. [Unifonic Inc](https://www.unifonic.com/)
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
1. [Viaduct](https://www.viaduct.ai/)
1. [Virtuo](https://www.govirtuo.com/)
1. [VISITS Technologies](https://visits.world/en)
1. [Volvo Cars](https://www.volvocars.com/)
1. [VSHN - The DevOps Company](https://vshn.ch/)
1. [Walkbase](https://www.walkbase.com/)
1. [Webstores](https://www.webstores.nl)
1. [Wehkamp](https://www.wehkamp.nl/)
1. [WeMo Scooter](https://www.wemoscooter.com/)
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
1. [Witick](https://witick.io/)
1. [WooliesX](https://wooliesx.com.au/)
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
1. [WSpot](https://www.wspot.com.br/)
1. [Yieldlab](https://www.yieldlab.de/)
1. [Youverify](https://youverify.co/)
1. [Yubo](https://www.yubo.live/)
1. [Zimpler](https://www.zimpler.com/)
1. [ZOZO](https://corp.zozo.com/)
1. [Trendyol](https://www.trendyol.com/)
1. [RapidAPI](https://www.rapidapi.com/)
1. [MTN Group](https://www.mtn.com/)
1. [Moengage](https://www.moengage.com/)
1. [LexisNexis](https://www.lexisnexis.com/)
1. [PayPay](https://paypay.ne.jp/)

View File

@@ -1 +1 @@
2.4.23
1.7.0

View File

@@ -1,732 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
"github.com/argoproj/argo-cd/v2/applicationset/utils"
"github.com/argoproj/argo-cd/v2/common"
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/db"
)
const (
// Rather than importing the whole argocd-notifications controller, just copying the const here
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/subscriptions.go#L12
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
ReconcileRequeueOnValidationError = time.Minute * 3
)
var (
preservedAnnotations = []string{
NotifiedAnnotationKey,
argov1alpha1.AnnotationKeyRefresh,
}
)
// ApplicationSetReconciler reconciles a ApplicationSet object
type ApplicationSetReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder record.EventRecorder
Generators map[string]generators.Generator
ArgoDB db.ArgoDB
ArgoAppClientset appclientset.Interface
KubeClientset kubernetes.Interface
utils.Policy
utils.Renderer
}
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch
func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = r.Log.WithValues("applicationset", req.NamespacedName)
_ = log.WithField("applicationset", req.NamespacedName)
var applicationSetInfo argoprojiov1alpha1.ApplicationSet
parametersGenerated := false
if err := r.Get(ctx, req.NamespacedName, &applicationSetInfo); err != nil {
if client.IgnoreNotFound(err) != nil {
log.WithError(err).Infof("unable to get ApplicationSet: '%v' ", err)
}
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
return ctrl.Result{}, nil
}
// Log a warning if there are unrecognized generators
utils.CheckInvalidGenerators(&applicationSetInfo)
// desiredApplications is the main list of all expected Applications from all generators in this appset.
desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: string(applicationSetReason),
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
parametersGenerated = true
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo, req.Namespace)
if err != nil {
// While some generators may return an error that requires user intervention,
// other generators reference external resources that may change to cause
// the error to no longer occur. We thus log the error and requeue
// with a timeout to give this another shot at a later time.
//
// Changes to watched resources will cause this to be reconciled sooner than
// the RequeueAfter time.
log.Errorf("error occurred during application validation: %s", err.Error())
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
}
var validApps []argov1alpha1.Application
for i := range desiredApplications {
if validateErrors[i] == nil {
validApps = append(validApps, desiredApplications[i])
}
}
if len(validateErrors) > 0 {
var message string
for _, v := range validateErrors {
message = v.Error()
log.Errorf("validation error found during application validation: %s", message)
}
if len(validateErrors) > 1 {
// Only the last message gets added to the appset status, to keep the size reasonable.
message = fmt.Sprintf("%s (and %d more)", message, len(validateErrors)-1)
}
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: message,
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
}
if r.Policy.Update() {
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonUpdateApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
} else {
err = r.createInCluster(ctx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonCreateApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
}
if r.Policy.Delete() {
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonDeleteApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
}
if applicationSetInfo.RefreshRequired() {
delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh)
err := r.Client.Update(ctx, &applicationSetInfo)
if err != nil {
log.Warnf("error occurred while updating ApplicationSet: %v", err)
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonRefreshApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
}
requeueAfter := r.getMinRequeueAfter(&applicationSetInfo)
log.WithField("requeueAfter", requeueAfter).Info("end reconcile")
if len(validateErrors) == 0 {
if err := r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "All applications have been generated successfully",
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{
RequeueAfter: requeueAfter,
}, nil
}
func getParametersGeneratedCondition(parametersGenerated bool, message string) argoprojiov1alpha1.ApplicationSetCondition {
var paramtersGeneratedCondition argoprojiov1alpha1.ApplicationSetCondition
if parametersGenerated {
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
Message: "Successfully generated parameters for all Applications",
Reason: argoprojiov1alpha1.ApplicationSetReasonParametersGenerated,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}
} else {
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
Message: message,
Reason: argoprojiov1alpha1.ApplicationSetReasonErrorOccurred,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
}
}
return paramtersGeneratedCondition
}
func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argoprojiov1alpha1.ApplicationSetCondition {
var resourceUpToDateCondition argoprojiov1alpha1.ApplicationSetCondition
if errorOccurred {
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: message,
Reason: reason,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
}
} else {
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "ApplicationSet up to date",
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}
}
return resourceUpToDateCondition
}
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argoprojiov1alpha1.ApplicationSet, condition argoprojiov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error {
// check if error occurred during reconcile process
errOccurred := condition.Type == argoprojiov1alpha1.ApplicationSetConditionErrorOccurred
var errOccurredCondition argoprojiov1alpha1.ApplicationSetCondition
if errOccurred {
errOccurredCondition = condition
} else {
errOccurredCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: "Successfully generated parameters for all Applications",
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
}
}
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
newConditions := []argoprojiov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
needToUpdateConditions := false
for _, condition := range newConditions {
// do nothing if appset already has same condition
for _, c := range applicationSet.Status.Conditions {
if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) {
needToUpdateConditions = true
break
}
}
}
evaluatedTypes := map[argoprojiov1alpha1.ApplicationSetConditionType]bool{
argoprojiov1alpha1.ApplicationSetConditionErrorOccurred: true,
argoprojiov1alpha1.ApplicationSetConditionParametersGenerated: true,
argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
}
if needToUpdateConditions || len(applicationSet.Status.Conditions) < 3 {
// fetch updated Application Set object before updating it
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %v", err)
}
applicationSet.Status.SetConditions(
newConditions, evaluatedTypes,
)
// Update the newly fetched object with new set of conditions
err := r.Client.Status().Update(ctx, applicationSet)
if err != nil && !apierr.IsNotFound(err) {
return fmt.Errorf("unable to set application set condition: %v", err)
}
}
return nil
}
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
// generated applications.
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argoprojiov1alpha1.ApplicationSet, namespace string) (map[int]error, error) {
errorsByIndex := map[int]error{}
namesSet := map[string]bool{}
for i, app := range desiredApplications {
if !namesSet[app.Name] {
namesSet[app.Name] = true
} else {
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
continue
}
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
continue
}
return nil, err
}
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, namespace); err != nil {
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
continue
}
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
if err != nil {
return nil, err
}
if len(conditions) > 0 {
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
continue
}
}
return errorsByIndex, nil
}
func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argoprojiov1alpha1.ApplicationSet) time.Duration {
var res time.Duration
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
relevantGenerators := generators.GetRelevantGenerators(&requestedGenerator, r.Generators)
for _, g := range relevantGenerators {
t := g.GetRequeueAfter(&requestedGenerator)
if res == 0 {
res = t
} else if t != 0 && t < res {
res = t
}
}
}
return res
}
func getTempApplication(applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) *argov1alpha1.Application {
var tmplApplication argov1alpha1.Application
tmplApplication.Annotations = applicationSetTemplate.Annotations
tmplApplication.Labels = applicationSetTemplate.Labels
tmplApplication.Namespace = applicationSetTemplate.Namespace
tmplApplication.Name = applicationSetTemplate.Name
tmplApplication.Spec = applicationSetTemplate.Spec
tmplApplication.Finalizers = applicationSetTemplate.Finalizers
return &tmplApplication
}
func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argoprojiov1alpha1.ApplicationSetReasonType, error) {
var res []argov1alpha1.Application
var firstError error
var applicationSetReason argoprojiov1alpha1.ApplicationSetReasonType
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo)
if err != nil {
log.WithError(err).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
firstError = err
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonApplicationParamsGenerationError
}
continue
}
for _, a := range t {
tmplApplication := getTempApplication(a.Template)
for _, p := range a.Params {
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p)
if err != nil {
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
firstError = err
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonRenderTemplateParamsError
}
continue
}
res = append(res, *app)
}
}
log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
}
return res, applicationSetReason, firstError
}
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
// grab the job object, extract the owner...
app := rawObj.(*argov1alpha1.Application)
owner := metav1.GetControllerOf(app)
if owner == nil {
return nil
}
// ...make sure it's a application set...
if owner.APIVersion != argoprojiov1alpha1.GroupVersion.String() || owner.Kind != "ApplicationSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&argoprojiov1alpha1.ApplicationSet{}).
Owns(&argov1alpha1.Application{}).
Watches(
&source.Kind{Type: &corev1.Secret{}},
&clusterSecretEventHandler{
Client: mgr.GetClient(),
Log: log.WithField("type", "createSecretEventHandler"),
}).
// TODO: also watch Applications and respond on changes if we own them.
Complete(r)
}
// createOrUpdateInCluster will create / update application resources in the cluster.
// - For new applications, it will call create
// - For existing application, it will call update
// The function also adds owner reference to all applications, and uses it to delete them.
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var firstError error
// Creates or updates the application in appList
for _, generatedApp := range desiredApplications {
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
generatedApp.Namespace = applicationSet.Namespace
found := &argov1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: generatedApp.Name,
Namespace: generatedApp.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Application",
APIVersion: "argoproj.io/v1alpha1",
},
}
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
found.Spec = generatedApp.Spec
// Preserve specially treated argo cd annotations:
// * https://github.com/argoproj/applicationset/issues/180
// * https://github.com/argoproj/argo-cd/issues/10500
for _, key := range preservedAnnotations {
if state, exists := found.ObjectMeta.Annotations[key]; exists {
if generatedApp.Annotations == nil {
generatedApp.Annotations = map[string]string{}
}
generatedApp.Annotations[key] = state
}
}
found.ObjectMeta.Annotations = generatedApp.Annotations
found.ObjectMeta.Finalizers = generatedApp.Finalizers
found.ObjectMeta.Labels = generatedApp.Labels
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
})
if err != nil {
appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action)
if firstError == nil {
firstError = err
}
continue
}
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
appLog.Logf(log.InfoLevel, "%s Application", action)
}
return firstError
}
// createInCluster will filter from the desiredApplications only the application that needs to be created
// Then it will call createOrUpdateInCluster to do the actual create
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var createApps []argov1alpha1.Application
current, err := r.getCurrentApplications(ctx, applicationSet)
if err != nil {
return err
}
m := make(map[string]bool) // Will holds the app names that are current in the cluster
for _, app := range current {
m[app.Name] = true
}
// filter applications that are not in m[string]bool (new to the cluster)
for _, app := range desiredApplications {
_, exists := m[app.Name]
if !exists {
createApps = append(createApps, app)
}
}
return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
}
func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
// TODO: Should this use the context param?
var current argov1alpha1.ApplicationList
err := r.Client.List(context.Background(), &current, client.MatchingFields{".metadata.controller": applicationSet.Name})
if err != nil {
return nil, err
}
return current.Items, nil
}
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
// The function must be called after all generators had been called and generated applications
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
// clusterList, err := argoDB.ListClusters(ctx)
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, applicationSet.Namespace)
if err != nil {
return err
}
// Save current applications to be able to delete the ones that are not in appList
current, err := r.getCurrentApplications(ctx, applicationSet)
if err != nil {
return err
}
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
for _, app := range desiredApplications {
m[app.Name] = true
}
// Delete apps that are not in m[string]bool
var firstError error
for _, app := range current {
appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
_, exists := m[app.Name]
if !exists {
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
if err != nil {
appLog.WithError(err).Error("failed to update Application")
if firstError != nil {
firstError = err
}
continue
}
err = r.Client.Delete(ctx, &app)
if err != nil {
appLog.WithError(err).Error("failed to delete Application")
if firstError != nil {
firstError = err
}
continue
}
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
appLog.Log(log.InfoLevel, "Deleted application")
}
}
return firstError
}
// removeFinalizerOnInvalidDestination removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, app *argov1alpha1.Application, clusterList *argov1alpha1.ClusterList, appLog *log.Entry) error {
// Only check if the finalizers need to be removed IF there are finalizers to remove
if len(app.Finalizers) == 0 {
return nil
}
var validDestination bool
// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, applicationSet.Namespace); err != nil {
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
validDestination = false
} else {
// Detect if the destination's server field does not match an existing cluster
matchingCluster := false
for _, cluster := range clusterList.Items {
// Server fields must match. Note that ValidateDestination ensures that the server field is set, if applicable.
if app.Spec.Destination.Server != cluster.Server {
continue
}
// The name must match, if it is not empty
if app.Spec.Destination.Name != "" && cluster.Name != app.Spec.Destination.Name {
continue
}
matchingCluster = true
break
}
if !matchingCluster {
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name)
}
validDestination = matchingCluster
}
// If the destination is invalid (for example the cluster is no longer defined), then remove
// the application finalizers to avoid triggering Argo CD bug #5817
if !validDestination {
// Filter out the Argo CD finalizer from the finalizer list
var newFinalizers []string
for _, existingFinalizer := range app.Finalizers {
if existingFinalizer != argov1alpha1.ResourcesFinalizerName { // only remove this one
newFinalizers = append(newFinalizers, existingFinalizer)
}
}
// If the finalizer length changed (due to filtering out an Argo finalizer), update the finalizer list on the app
if len(newFinalizers) != len(app.Finalizers) {
app.Finalizers = newFinalizers
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Updated", "Updated Application %q finalizer before deletion, because application has an invalid destination", app.Name)
appLog.Log(log.InfoLevel, "Updating application finalizer before deletion, because application has an invalid destination")
err := r.Client.Update(ctx, app, &client.UpdateOptions{})
if err != nil {
return err
}
}
}
return nil
}
var _ handler.EventHandler = &clusterSecretEventHandler{}

File diff suppressed because it is too large Load Diff

View File

@@ -1,84 +0,0 @@
package controllers
import (
"context"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
// clusterSecretEventHandler is used when watching Secrets to check if they are ArgoCD Cluster Secrets, and if so
// requeue any related ApplicationSets.
type clusterSecretEventHandler struct {
//handler.EnqueueRequestForOwner
Log log.FieldLogger
Client client.Client
}
func (h *clusterSecretEventHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.Object)
}
func (h *clusterSecretEventHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.ObjectNew)
}
func (h *clusterSecretEventHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.Object)
}
func (h *clusterSecretEventHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.Object)
}
// addRateLimitingInterface defines the Add method of workqueue.RateLimitingInterface, allow us to easily mock
// it for testing purposes.
type addRateLimitingInterface interface {
Add(item interface{})
}
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingInterface, object client.Object) {
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
return
}
h.Log.WithFields(log.Fields{
"namespace": object.GetNamespace(),
"name": object.GetName(),
}).Info("processing event for cluster secret")
appSetList := &argoprojiov1alpha1.ApplicationSetList{}
err := h.Client.List(context.Background(), appSetList)
if err != nil {
h.Log.WithError(err).Error("unable to list ApplicationSets")
return
}
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
for _, appSet := range appSetList.Items {
foundClusterGenerator := false
for _, generator := range appSet.Spec.Generators {
if generator.Clusters != nil {
foundClusterGenerator = true
break
}
}
if foundClusterGenerator {
// TODO: only queue the AppGenerator if the labels match this cluster
req := ctrl.Request{NamespacedName: types.NamespacedName{Namespace: appSet.Namespace, Name: appSet.Name}}
q.Add(req)
}
}
}

View File

@@ -1,234 +0,0 @@
package controllers
import (
"testing"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
func TestClusterEventHandler(t *testing.T) {
scheme := runtime.NewScheme()
err := argoprojiov1alpha1.AddToScheme(scheme)
assert.Nil(t, err)
err = argov1alpha1.AddToScheme(scheme)
assert.Nil(t, err)
tests := []struct {
name string
items []argoprojiov1alpha1.ApplicationSet
secret corev1.Secret
expectedRequests []ctrl.Request
}{
{
name: "no application sets should mean no requests",
items: []argoprojiov1alpha1.ApplicationSet{},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{},
},
{
name: "a cluster generator should produce a request",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{{
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
}},
},
{
name: "multiple cluster generators should produce multiple requests",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set2",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set2"}},
},
},
{
name: "non-cluster generator should not match",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "another-namespace",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
{
ObjectMeta: v1.ObjectMeta{
Name: "app-set-non-cluster",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
List: &argoprojiov1alpha1.ListGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
},
},
{
name: "non-argo cd secret should not match",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "another-namespace",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-non-argocd-secret",
},
},
expectedRequests: []reconcile.Request{},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
appSetList := argoprojiov1alpha1.ApplicationSetList{
Items: test.items,
}
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
handler := &clusterSecretEventHandler{
Client: fakeClient,
Log: log.WithField("type", "createSecretEventHandler"),
}
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
handler.queueRelatedAppGenerators(&mockAddRateLimitingInterface, &test.secret)
assert.False(t, mockAddRateLimitingInterface.errorOccurred)
assert.ElementsMatch(t, mockAddRateLimitingInterface.addedItems, test.expectedRequests)
})
}
}
// Add checks the type, and adds it to the internal list of received additions
func (obj *mockAddRateLimitingInterface) Add(item interface{}) {
if req, ok := item.(ctrl.Request); ok {
obj.addedItems = append(obj.addedItems, req)
} else {
obj.errorOccurred = true
}
}
type mockAddRateLimitingInterface struct {
errorOccurred bool
addedItems []ctrl.Request
}

View File

@@ -1,180 +0,0 @@
package controllers
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
dynfake "k8s.io/client-go/dynamic/fake"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
func TestRequeueAfter(t *testing.T) {
mockServer := argoCDServiceMock{}
ctx := context.Background()
scheme := runtime.NewScheme()
err := argoappsetv1alpha1.AddToScheme(scheme)
assert.Nil(t, err)
gvrToListKind := map[schema.GroupVersionResource]string{{
Group: "mallard.io",
Version: "v1",
Resource: "ducks",
}: "DuckList"}
appClientset := kubefake.NewSimpleClientset()
k8sClient := fake.NewClientBuilder().Build()
duckType := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v2quack",
"kind": "Duck",
"metadata": map[string]interface{}{
"name": "mightyduck",
"namespace": "namespace",
"labels": map[string]interface{}{"duck": "all-species"},
},
"status": map[string]interface{}{
"decisions": []interface{}{
map[string]interface{}{
"clusterName": "staging-01",
},
map[string]interface{}{
"clusterName": "production-01",
},
},
},
},
}
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
terminalGenerators := map[string]generators.Generator{
"List": generators.NewListGenerator(),
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
"Git": generators.NewGitGenerator(mockServer),
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build()),
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
"PullRequest": generators.NewPullRequestGenerator(k8sClient),
}
nestedGenerators := map[string]generators.Generator{
"List": terminalGenerators["List"],
"Clusters": terminalGenerators["Clusters"],
"Git": terminalGenerators["Git"],
"SCMProvider": terminalGenerators["SCMProvider"],
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
"PullRequest": terminalGenerators["PullRequest"],
"Matrix": generators.NewMatrixGenerator(terminalGenerators),
"Merge": generators.NewMergeGenerator(terminalGenerators),
}
topLevelGenerators := map[string]generators.Generator{
"List": terminalGenerators["List"],
"Clusters": terminalGenerators["Clusters"],
"Git": terminalGenerators["Git"],
"SCMProvider": terminalGenerators["SCMProvider"],
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
"PullRequest": terminalGenerators["PullRequest"],
"Matrix": generators.NewMatrixGenerator(nestedGenerators),
"Merge": generators.NewMergeGenerator(nestedGenerators),
}
client := fake.NewClientBuilder().WithScheme(scheme).Build()
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(0),
Generators: topLevelGenerators,
}
type args struct {
appset *argoappsetv1alpha1.ApplicationSet
}
tests := []struct {
name string
args args
want time.Duration
wantErr assert.ErrorAssertionFunc
}{
{name: "Cluster", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{Clusters: &argoappsetv1alpha1.ClusterGenerator{}}},
},
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
{name: "ClusterMergeNested", args: args{&argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
{Merge: &argoappsetv1alpha1.MergeGenerator{
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
{
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
Git: &argoappsetv1alpha1.GitGenerator{},
},
},
}},
},
},
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
{name: "ClusterMatrixNested", args: args{&argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
{Matrix: &argoappsetv1alpha1.MatrixGenerator{
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
{
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
Git: &argoappsetv1alpha1.GitGenerator{},
},
},
}},
},
},
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
{name: "ListGenerator", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{List: &argoappsetv1alpha1.ListGenerator{}}},
},
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
})
}
}
type argoCDServiceMock struct {
mock *mock.Mock
}
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
args := a.mock.Called(ctx, repoURL, revision)
return args.Get(0).([]string), args.Error(1)
}
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
args := a.mock.Called(ctx, repoURL, revision, pattern)
return args.Get(0).(map[string][]byte), args.Error(1)
}
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
args := a.mock.Called(ctx, repoURL, revision, path)
return args.Get(0).([]byte), args.Error(1)
}
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
args := a.mock.Called(ctx, repoURL, revision)
return args.Get(0).([]string), args.Error(1)
}

View File

@@ -1,19 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- clusters: {}
template:
metadata:
name: '{{name}}-guestbook'
spec:
project: "default"
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
targetRevision: HEAD
path: guestbook
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,57 +0,0 @@
# How the Cluster Decision Resource generator works for clusterDecisionResource
1. The Cluster Decision Resource generator reads a configurable status format:
```yaml
status:
clusters:
- name: cluster-01
- name: cluster-02
```
This is a common status format. Another format that could be read looks like this:
```yaml
status:
decisions:
- clusterName: cluster-01
namespace: cluster-01
- clusterName: cluster-02
namespace: cluster-02
```
2. Any resource that has a list of key / value pairs, where the value matches ArgoCD cluster names can be used.
3. The key / value pairs found in each element of the list will be available to the template. As well, `name` and `server` will still be available to the template.
4. The Service Account used by the ApplicationSet controller must have access to `Get` the resource you want to retrieve the duck type definition from
5. A configMap is used to identify the resource to read status of generated ArgoCD clusters from. You can use multiple resources by creating a ConfigMap for each one in the ArgoCD namespace.
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: my-configmap
data:
apiVersion: group.io/v1
kind: mykinds
statusListKey: clusters
matchKey: name
```
* `apiVersion` - This is the apiVersion of your resource
* `kind` - This is the plural kind of your resource
* `statusListKey` - Default is 'clusters', this is the key found in your resource's status that is a list of ArgoCD clusters.
* `matchKey` - Is the key name found in the cluster list, `name` and `clusterName` are the keys in the examples above.
# Applying the example
1. Connect to a cluster with the ApplicationSet controller running
2. Edit the Role for the ApplicationSet service account, and grant it permission to `list` the `placementdecisions` resources, from apiGroups `cluster.open-cluster-management.io/v1alpha1`
```yaml
- apiGroups:
- "cluster.open-cluster-management.io/v1alpha1"
resources:
- placementdecisions
verbs:
- list
```
3. Apply the following controller and associated ManagedCluster CRD's:
https://github.com/open-cluster-management/placement
4. Now apply the PlacementDecision and an ApplicationSet:
```bash
kubectl apply -f ./placementdecision.yaml
kubectl apply -f ./configMap.yaml
kubectl apply -f ./ducktype-example.yaml
```
5. For now this won't do anything until you create a controller that populates the `Status.Decisions` array.

View File

@@ -1,11 +0,0 @@
# To generate a Status.Decisions from this CRD, requires https://github.com/open-cluster-management/multicloud-operators-placementrule be deployed
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ocm-placement
data:
apiVersion: apps.open-cluster-management.io/v1
kind: placementrules
statusListKey: decisions
matchKey: clusterName

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: book-import
spec:
generators:
- clusterDecisionResource:
configMapRef: ocm-placement
name: test-placement
requeueAfterSeconds: 30
template:
metadata:
name: '{{clusterName}}-book-import'
spec:
project: "default"
source:
repoURL: https://github.com/open-cluster-management/application-samples.git
targetRevision: HEAD
path: book-import
destination:
name: '{{clusterName}}'
namespace: bookimport
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,18 +0,0 @@
---
apiVersion: apps.open-cluster-management.io/v1
kind: PlacementRule
metadata:
name: test-placement
spec:
clusterReplicas: 1 # Availability choice, maximum number of clusters to provision at once
clusterSelector:
matchLabels:
'usage': 'development'
clusterConditions:
- type: ManagedClusterConditionAvailable
status: "True"
# Below is sample output the generator can consume.
status:
decisions:
- clusterName: cluster-01
- clusterName: cluster-02

View File

@@ -1,22 +0,0 @@
# This is an example of a typical ApplicationSet which uses the cluster generator.
# An ApplicationSet is comprised with two stanzas:
# - spec.generator - producer of a list of values supplied as arguments to an app template
# - spec.template - an application template, which has been parameterized
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- clusters: {}
template:
metadata:
name: '{{name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
chart: guestbook
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,33 +0,0 @@
# The cluster generator produces an items list from all clusters registered to Argo CD.
# It automatically provides the following fields as values to the app template:
# - name
# - server
# - metadata.labels.<key>
# - metadata.annotations.<key>
# - values.<key>
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- clusters:
selector:
matchLabels:
argocd.argoproj.io/secret-type: cluster
values:
project: default
template:
metadata:
name: '{{name}}-guestbook'
labels:
environment: '{{metadata.labels.environment}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
chart: guestbook
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,44 +0,0 @@
# This example demonstrates the git directory generator, which produces an items list
# based on discovery of directories in a git repo matching a specified pattern.
# Git generators automatically provide {{path}} and {{path.basename}} as available
# variables to the app template.
#
# Suppose the following git directory structure (note the use of different config tools):
#
# cluster-deployments
# └── add-ons
# ├── argo-rollouts
# │   ├── all.yaml
# │   └── kustomization.yaml
# ├── argo-workflows
# │   └── install.yaml
# ├── grafana
# │   ├── Chart.yaml
# │   └── values.yaml
# └── prometheus-operator
# ├── Chart.yaml
# └── values.yaml
#
# The following ApplicationSet would produce four applications (in different namespaces),
# using the directory basename as both the namespace and application name.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-addons
spec:
generators:
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
directories:
- path: add-ons/*
template:
metadata:
name: '{{path.basename}}'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: http://kubernetes.default.svc
namespace: '{{path.basename}}'

View File

@@ -1,55 +0,0 @@
# This example demonstrates a git file generator which traverses the directory structure of a git
# repository to discover items based on a filename convention. For each file discovered, the
# contents of the discovered files themselves, act as the set of inputs to the app template.
#
# Suppose the following git directory structure:
#
# cluster-deployments
# ├── apps
# │ └── guestbook
# │ └── install.yaml
# └── cluster-config
# ├── engineering
# │ ├── dev
# │ │ └── config.json
# │ └── prod
# │ └── config.json
# └── finance
# ├── dev
# │ └── config.json
# └── prod
# └── config.json
#
# The discovered files (e.g. config.json) files can be any structured data supplied to the
# generated application. e.g.:
# {
# "aws_account": "123456",
# "asset_id": "11223344"
# "cluster": {
# "owner": "Jesse_Suen@intuit.com",
# "name": "engineering-dev",
# "address": "http://1.2.3.4"
# }
# }
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
files:
- path: "**/config.json"
template:
metadata:
name: '{{cluster.name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: apps/guestbook
destination:
server: '{{cluster.address}}'
namespace: guestbook

View File

@@ -1,68 +0,0 @@
# This example demonstrates a git file generator which produces its items based on one or
# more files referenced in a git repo. The referenced files would contain a json/yaml list of
# arbitrary structured objects. Each item of the list would become a set of parameters to a
# generated application.
#
# Suppose the following git directory structure:
#
# cluster-deployments
# ├── apps
# │ └── guestbook
# │ ├── v1.0
# │ │ └── install.yaml
# │ └── v2.0
# │ └── install.yaml
# └── config
# └── clusters.json
#
# In this example, the `clusters.json` file is json list of structured data:
# [
# {
# "account": "123456",
# "asset_id": "11223344",
# "cluster": {
# "owner": "Jesse_Suen@intuit.com",
# "name": "engineering-dev",
# "address": "http://1.2.3.4"
# },
# "appVersions": {
# "prometheus-operator": "v0.38",
# "guestbook": "v2.0"
# }
# },
# {
# "account": "456789",
# "asset_id": "55667788",
# "cluster": {
# "owner": "Alexander_Matyushentsev@intuit.com",
# "name": "engineering-prod",
# "address": "http://2.4.6.8"
# },
# "appVersions": {
# "prometheus-operator": "v0.38",
# "guestbook": "v1.0"
# }
# }
# ]
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
files:
- path: config/clusters.json
template:
metadata:
name: '{{cluster.name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: apps/guestbook/{{appVersions.guestbook}}
destination:
server: http://kubernetes.default.svc
namespace: guestbook

View File

@@ -1,33 +0,0 @@
# The list generator specifies a literal list of argument values to the app spec template.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
values:
project: dev
- cluster: engineering-prod
url: https://2.4.6.8
values:
project: prod
- cluster: finance-preprod
url: https://9.8.7.6
values:
project: preprod
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: guestbook/{{cluster}}
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,3 +0,0 @@
# Proposal Examples
This directory contains examples that are not yet implemented.
They are part of the project to indicate future progress, and we are welcome any contribution that will add an implementation

View File

@@ -1,48 +0,0 @@
# For all generators, filters can be applied to reduce the generated items to a smaller subset.
# A powerful set of filter expressions are supported using syntax provided by the
# https://github.com/antonmedv/expr library. Examples expressions are demonstrated below
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
# Match all clusters who meet ALL of the following conditions:
# 1. name matches the regex `sales-.*`
# 2. environment label is either 'staging' or 'prod'
- clusters:
filters:
- expr: '{{name}} matches "sales-.*"'
- expr: '{{metadata.labels.environment}} in [staging, prod]'
values:
version: '2.0.0'
# Filter items from `config/clusters.json` in the `cluster-deployments` git repo,
# to only those having the `cluster.enabled == true` property. e.g.:
# {
# ...
# "cluster": {
# "enabled": true,
# ...
# }
# }
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
files:
- path: config/clusters.json
filters:
- expr: '{{cluster.enabled}} == true'
template:
metadata:
name: '{{name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: "{{values.version}}"
chart: guestbook
helm:
parameters:
- name: foo
value: "{{metadata.annotations.foo}}"
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,48 +0,0 @@
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
# useful to do this in order to override the spec.template stanza, and when simple string
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
# a partial definition, which overrides/patch the default template.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
template:
metadata: {}
spec:
project: "project"
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
path: '{{cluster}}-override'
destination: {}
- list:
elements:
- cluster: engineering-prod
url: https://1.2.3.4
template:
metadata: {}
spec:
project: "project2"
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
path: '{{cluster}}-override2'
destination: {}
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: "project"
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: guestbook/{{cluster}}
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,6 +0,0 @@
#namePrefix: kustomize-
resources:
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: helm-prometheus-operator
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,4 +0,0 @@
dependencies:
- name: kube-prometheus-stack
version: 40.5.0
repository: https://prometheus-community.github.io/helm-charts

View File

@@ -1,6 +0,0 @@
#namePrefix: kustomize-
resources:
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,23 +0,0 @@
apiVersion: v2
name: helm-guestbook
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "helm-guestbook.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "helm-guestbook.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "helm-guestbook.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "helm-guestbook.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -1,32 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "helm-guestbook.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "helm-guestbook.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "helm-guestbook.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -1,52 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "helm-guestbook.fullname" . }}
labels:
app: {{ template "helm-guestbook.name" . }}
chart: {{ template "helm-guestbook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
revisionHistoryLimit: 3
selector:
matchLabels:
app: {{ template "helm-guestbook.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "helm-guestbook.name" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "helm-guestbook.fullname" . }}
labels:
app: {{ template "helm-guestbook.name" . }}
chart: {{ template "helm-guestbook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ template "helm-guestbook.name" . }}
release: {{ .Release.Name }}

View File

@@ -1,45 +0,0 @@
# Default values for helm-guestbook.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: gcr.io/heptio-images/ks-guestbook-demo
tag: 0.1
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: helm-prometheus-operator
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,4 +0,0 @@
dependencies:
- name: kube-prometheus-stack
version: 40.5.0
repository: https://prometheus-community.github.io/helm-charts

View File

@@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-addons
namespace: argocd
spec:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/git-generator-directory/excludes/cluster-addons/*
- exclude: true
path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook
template:
metadata:
name: '{{path.basename}}'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: https://kubernetes.default.svc
namespace: '{{path.basename}}'
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-addons
namespace: argocd
spec:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/git-generator-directory/cluster-addons/*
template:
metadata:
name: '{{path.basename}}'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: https://kubernetes.default.svc
namespace: '{{path.basename}}'
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,7 +0,0 @@
namePrefix: kustomize-
resources:
- guestbook-ui-deployment.yaml
- guestbook-ui-svc.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,9 +0,0 @@
{
"aws_account": "123456",
"asset_id": "11223344",
"cluster": {
"owner": "cluster-admin@company.com",
"name": "engineering-dev",
"address": "http://1.2.3.4"
}
}

View File

@@ -1,9 +0,0 @@
{
"aws_account": "123456",
"asset_id": "11223344",
"cluster": {
"owner": "cluster-admin@company.com",
"name": "engineering-prod",
"address": "http://1.2.3.4"
}
}

View File

@@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
files:
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
template:
metadata:
name: '{{cluster.name}}-guestbook'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: "applicationset/examples/git-generator-files-discovery/apps/guestbook"
destination:
server: https://kubernetes.default.svc
#server: '{{cluster.address}}'
namespace: guestbook

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
- cluster: engineering-prod
url: https://kubernetes.default.svc
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: applicationset/examples/list-generator/guestbook/{{cluster}}
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,6 +0,0 @@
#namePrefix: kustomize-
resources:
- namespace-install.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,417 +0,0 @@
# This is an auto-generated file. DO NOT EDIT
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterworkflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: ClusterWorkflowTemplate
listKind: ClusterWorkflowTemplateList
plural: clusterworkflowtemplates
shortNames:
- clusterwftmpl
- cwft
singular: clusterworkflowtemplate
scope: Cluster
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cronworkflows.argoproj.io
spec:
group: argoproj.io
names:
kind: CronWorkflow
listKind: CronWorkflowList
plural: cronworkflows
shortNames:
- cwf
- cronwf
singular: cronworkflow
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workfloweventbindings.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowEventBinding
listKind: WorkflowEventBindingList
plural: workfloweventbindings
shortNames:
- wfeb
singular: workfloweventbinding
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflows.argoproj.io
spec:
additionalPrinterColumns:
- JSONPath: .status.phase
description: Status of the workflow
name: Status
type: string
- JSONPath: .status.startedAt
description: When the workflow was started
format: date-time
name: Age
type: date
group: argoproj.io
names:
kind: Workflow
listKind: WorkflowList
plural: workflows
shortNames:
- wf
singular: workflow
scope: Namespaced
subresources: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowTemplate
listKind: WorkflowTemplateList
plural: workflowtemplates
shortNames:
- wftmpl
singular: workflowtemplate
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-role
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- get
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- create
- apiGroups:
- argoproj.io
resources:
- workflowtemplates
- workflowtemplates/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- argoproj.io
resources:
- cronworkflows
- cronworkflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-server-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- watch
- create
- patch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- apiGroups:
- argoproj.io
resources:
- workflows
- workfloweventbindings
- workflowtemplates
- cronworkflows
- cronworkflows/finalizers
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-role
subjects:
- kind: ServiceAccount
name: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-server-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-server-role
subjects:
- kind: ServiceAccount
name: argo-server
---
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
---
apiVersion: v1
kind: Service
metadata:
name: argo-server
spec:
ports:
- name: web
port: 2746
targetPort: 2746
selector:
app: argo-server
---
apiVersion: v1
kind: Service
metadata:
name: workflow-controller-metrics
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: workflow-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: argo-server
spec:
selector:
matchLabels:
app: argo-server
template:
metadata:
labels:
app: argo-server
spec:
containers:
- args:
- server
- --namespaced
image: argoproj/argocli:v2.12.5
name: argo-server
ports:
- containerPort: 2746
name: web
readinessProbe:
httpGet:
path: /
port: 2746
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 20
volumeMounts:
- mountPath: /tmp
name: tmp
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
serviceAccountName: argo-server
volumes:
- emptyDir: {}
name: tmp
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: workflow-controller
spec:
selector:
matchLabels:
app: workflow-controller
template:
metadata:
labels:
app: workflow-controller
spec:
containers:
- args:
- --configmap
- workflow-controller-configmap
- --executor-image
- argoproj/argoexec:v2.12.5
- --namespaced
command:
- workflow-controller
image: argoproj/workflow-controller:v2.12.5
livenessProbe:
httpGet:
path: /metrics
port: metrics
initialDelaySeconds: 30
periodSeconds: 30
name: workflow-controller
ports:
- containerPort: 9090
name: metrics
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
serviceAccountName: argo

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: helm-prometheus-operator
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,4 +0,0 @@
dependencies:
- name: kube-prometheus-stack
version: 9.4.10
repository: https://prometheus-community.github.io/helm-charts

View File

@@ -1,33 +0,0 @@
# This example demonstrates the combining of the git generator with a cluster generator
# The expected output would be an application per git directory and a cluster (application_count = git directory * clusters)
#
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-git
spec:
generators:
- matrix:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/matrix/cluster-addons/*
- clusters:
selector:
matchLabels:
argocd.argoproj.io/secret-type: cluster
template:
metadata:
name: '{{path.basename}}-{{name}}'
spec:
project: '{{metadata.labels.environment}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{server}}'
namespace: '{{path.basename}}'

View File

@@ -1,39 +0,0 @@
# This example demonstrates the combining of the git generator with a list generator
# The expected output would be an application per git directory and a list entry (application_count = git directory * list entries)
#
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: list-git
spec:
generators:
- matrix:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/matrix/cluster-addons/*
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
values:
project: dev
- cluster: engineering-prod
url: https://2.4.6.8
values:
project: prod
template:
metadata:
name: '{{path.basename}}-{{cluster}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{url}}'
namespace: '{{path.basename}}'

View File

@@ -1,37 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: list-and-list
namespace: argocd
spec:
generators:
- matrix:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
values:
project: default
- cluster: engineering-prod
url: https://kubernetes.default.svc
values:
project: default
- list:
elements:
- values:
suffix: '1'
- values:
suffix: '2'
template:
metadata:
name: '{{cluster}}-{{values.suffix}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{url}}'
namespace: '{{path.basename}}'

View File

@@ -1,67 +0,0 @@
# The matrix generator can contain other combination-type generators (matrix and union). But nested matrix and union
# generators cannot contain further-nested matrix or union generators.
#
# The generators are evaluated from most-nested to least-nested. In this case:
# 1. The union generator joins two lists to make 3 parameter sets.
# 2. The inner matrix generator takes the cartesian product of the two lists to make 4 parameters sets.
# 3. The outer matrix generator takes the cartesian product of the 3 union and the 4 inner matrix parameter sets to
# make 3*4=12 final parameter sets.
# 4. The 12 final parameter sets are evaluated against the top-level template to generate 12 Applications.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: matrix-and-union-in-matrix
spec:
generators:
- matrix:
generators:
- union:
mergeKeys:
- cluster
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
values:
project: default
- cluster: engineering-prod
url: https://kubernetes.default.svc
values:
project: default
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
values:
project: default
- cluster: engineering-test
url: https://kubernetes.default.svc
values:
project: default
- matrix:
generators:
- list:
elements:
- values:
suffix: '1'
- values:
suffix: '2'
- list:
elements:
- values:
prefix: 'first'
- values:
prefix: 'second'
template:
metadata:
name: '{{values.prefix}}-{{cluster}}-{{values.suffix}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{url}}'
namespace: '{{path.basename}}'

View File

@@ -1,44 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: merge-clusters-and-list
spec:
generators:
- merge:
mergeKeys:
- server
generators:
- clusters:
values:
kafka: 'true'
redis: 'false'
# For clusters with a specific label, enable Kafka.
- clusters:
selector:
matchLabels:
use-kafka: 'false'
values:
kafka: 'false'
# For a specific cluster, enable Redis.
- list:
elements:
- server: https://some-specific-cluster
values.redis: 'true'
template:
metadata:
name: '{{name}}'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
targetRevision: HEAD
path: helm-guestbook
helm:
parameters:
- name: kafka
value: '{{values.kafka}}'
- name: redis
value: '{{values.redis}}'
destination:
server: '{{server}}'
namespace: default

View File

@@ -1,43 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: merge-two-matrixes
spec:
generators:
- merge:
mergeKeys:
- server
- environment
generators:
- matrix:
generators:
- clusters:
values:
replicaCount: '2'
- list:
elements:
- environment: staging
namespace: guestbook-non-prod
- environment: prod
namespace: guestbook
- list:
elements:
- server: https://kubernetes.default.svc
environment: staging
values.replicaCount: '1'
template:
metadata:
name: '{{name}}-guestbook-{{environment}}'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
targetRevision: HEAD
path: helm-guestbook
helm:
parameters:
- name: replicaCount
value: '{{values.replicaCount}}'
destination:
server: '{{server}}'
namespace: '{{namespace}}'

View File

@@ -1,40 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: myapp
spec:
generators:
- pullRequest:
github:
# The GitHub organization or user.
owner: myorg
# The Github repository
repo: myrepo
# For GitHub Enterprise. (optional)
api: https://git.example.com/
# Reference to a Secret containing an access token. (optional)
tokenRef:
secretName: github-token
key: token
# Labels is used to filter the PRs that you want to target. (optional)
labels:
- preview
template:
metadata:
name: 'myapp-{{ branch }}-{{ number }}'
spec:
source:
repoURL: 'https://github.com/myorg/myrepo.git'
targetRevision: '{{ head_sha }}'
path: helm-guestbook
helm:
parameters:
- name: "image.tag"
value: "pull-{{ head_sha }}"
project: default
destination:
server: https://kubernetes.default.svc
namespace: "{{ branch }}-{{ number }}"
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- scmProvider:
github:
organization: argoproj
cloneProtocol: https
filters:
- repositoryMatch: example-apps
template:
metadata:
name: '{{ repository }}-guestbook'
spec:
project: "default"
source:
repoURL: '{{ url }}'
targetRevision: '{{ branch }}'
path: guestbook
destination:
server: https://kubernetes.default.svc
namespace: guestbook

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,7 +0,0 @@
namePrefix: kustomize-
resources:
- guestbook-ui-deployment.yaml
- guestbook-ui-svc.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,7 +0,0 @@
namePrefix: kustomize-
resources:
- guestbook-ui-deployment.yaml
- guestbook-ui-svc.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,36 +0,0 @@
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
# useful to do this in order to override the spec.template stanza, and when simple string
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
# a partial definition, which overrides/patch the default template.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
template:
metadata: {}
spec:
project: "default"
source:
targetRevision: HEAD
repoURL: https://github.com/argoproj/argo-cd.git
path: 'applicationset/examples/template-override/{{cluster}}-override'
destination: {}
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: "default"
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: applicationset/examples/template-override/default
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,186 +0,0 @@
package generators
import (
"context"
"fmt"
"regexp"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v2/util/settings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/argoproj/argo-cd/v2/applicationset/utils"
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
const (
ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type"
ArgoCDSecretTypeCluster = "cluster"
)
var _ Generator = (*ClusterGenerator)(nil)
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
type ClusterGenerator struct {
client.Client
ctx context.Context
clientset kubernetes.Interface
// namespace is the Argo CD namespace
namespace string
settingsManager *settings.SettingsManager
}
func NewClusterGenerator(c client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator {
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
g := &ClusterGenerator{
Client: c,
ctx: ctx,
clientset: clientset,
namespace: namespace,
settingsManager: settingsManager,
}
return g
}
// GetRequeueAfter never requeue the cluster generator because the `clusterSecretEventHandler` will requeue the appsets
// when the cluster secrets change
func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration {
return NoRequeueAfter
}
func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) *argoappsetv1alpha1.ApplicationSetTemplate {
return &appSetGenerator.Clusters.Template
}
func (g *ClusterGenerator) GenerateParams(
appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, _ *argoappsetv1alpha1.ApplicationSet) ([]map[string]string, error) {
if appSetGenerator == nil {
return nil, EmptyAppSetGeneratorError
}
if appSetGenerator.Clusters == nil {
return nil, EmptyAppSetGeneratorError
}
// Do not include the local cluster in the cluster parameters IF there is a non-empty selector
// - Since local clusters do not have secrets, they do not have labels to match against
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
if err != nil {
return nil, err
}
if clustersFromArgoCD == nil {
return nil, nil
}
clusterSecrets, err := g.getSecretsByClusterName(appSetGenerator)
if err != nil {
return nil, err
}
res := []map[string]string{}
secretsFound := []corev1.Secret{}
for _, cluster := range clustersFromArgoCD.Items {
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
// handled by the next step.
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
secretsFound = append(secretsFound, secretForCluster)
} else if !ignoreLocalClusters {
// If there is no secret for the cluster, it's the local cluster, so handle it here.
params := map[string]string{}
params["name"] = cluster.Name
params["server"] = cluster.Server
for key, value := range appSetGenerator.Clusters.Values {
params[fmt.Sprintf("values.%s", key)] = value
}
log.WithField("cluster", "local cluster").Info("matched local cluster")
res = append(res, params)
}
}
// For each matching cluster secret (non-local clusters only)
for _, cluster := range secretsFound {
params := map[string]string{}
params["name"] = string(cluster.Data["name"])
params["nameNormalized"] = sanitizeName(string(cluster.Data["name"]))
params["server"] = string(cluster.Data["server"])
for key, value := range cluster.ObjectMeta.Annotations {
params[fmt.Sprintf("metadata.annotations.%s", key)] = value
}
for key, value := range cluster.ObjectMeta.Labels {
params[fmt.Sprintf("metadata.labels.%s", key)] = value
}
for key, value := range appSetGenerator.Clusters.Values {
params[fmt.Sprintf("values.%s", key)] = value
}
log.WithField("cluster", cluster.Name).Info("matched cluster secret")
res = append(res, params)
}
return res, nil
}
func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
// List all Clusters:
clusterSecretList := &corev1.SecretList{}
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster)
secretSelector, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return nil, err
}
if err := g.Client.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
return nil, err
}
log.Debug("clusters matching labels", "count", len(clusterSecretList.Items))
res := map[string]corev1.Secret{}
for _, cluster := range clusterSecretList.Items {
clusterName := string(cluster.Data["name"])
res[clusterName] = cluster
}
return res, nil
}
// sanitize the name in accordance with the below rules
// 1. contain no more than 253 characters
// 2. contain only lowercase alphanumeric characters, '-' or '.'
// 3. start and end with an alphanumeric character
func sanitizeName(name string) string {
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
maxDNSNameLength := 253
name = strings.ToLower(name)
name = invalidDNSNameChars.ReplaceAllString(name, "-")
if len(name) > maxDNSNameLength {
name = name[:maxDNSNameLength]
}
return strings.Trim(name, "-.")
}

View File

@@ -1,254 +0,0 @@
package generators
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"testing"
kubefake "k8s.io/client-go/kubernetes/fake"
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
"github.com/stretchr/testify/assert"
)
type possiblyErroringFakeCtrlRuntimeClient struct {
client.Client
shouldError bool
}
func (p *possiblyErroringFakeCtrlRuntimeClient) List(ctx context.Context, secretList client.ObjectList, opts ...client.ListOption) error {
if p.shouldError {
return fmt.Errorf("could not list Secrets")
}
return p.Client.List(ctx, secretList, opts...)
}
func TestGenerateParams(t *testing.T) {
clusters := []client.Object{
&corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "staging-01",
Namespace: "namespace",
Labels: map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "staging",
"org": "foo",
},
Annotations: map[string]string{
"foo.argoproj.io": "staging",
},
},
Data: map[string][]byte{
"config": []byte("{}"),
"name": []byte("staging-01"),
"server": []byte("https://staging-01.example.com"),
},
Type: corev1.SecretType("Opaque"),
},
&corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "production-01",
Namespace: "namespace",
Labels: map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "production",
"org": "bar",
},
Annotations: map[string]string{
"foo.argoproj.io": "production",
},
},
Data: map[string][]byte{
"config": []byte("{}"),
"name": []byte("production_01/west"),
"server": []byte("https://production-01.example.com"),
},
Type: corev1.SecretType("Opaque"),
},
}
testCases := []struct {
name string
selector metav1.LabelSelector
values map[string]string
expected []map[string]string
// clientError is true if a k8s client error should be simulated
clientError bool
expectedError error
}{
{
name: "no label selector",
selector: metav1.LabelSelector{},
values: nil,
expected: []map[string]string{
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
{"name": "in-cluster", "server": "https://kubernetes.default.svc"},
},
clientError: false,
expectedError: nil,
},
{
name: "secret type label selector",
selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
},
},
values: nil,
expected: []map[string]string{
{"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
{"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
},
clientError: false,
expectedError: nil,
},
{
name: "production-only",
selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"environment": "production",
},
},
values: map[string]string{
"foo": "bar",
},
expected: []map[string]string{
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
},
clientError: false,
expectedError: nil,
},
{
name: "production or staging",
selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "environment",
Operator: "In",
Values: []string{
"production",
"staging",
},
},
},
},
values: map[string]string{
"foo": "bar",
},
expected: []map[string]string{
{"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
{"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production"},
},
clientError: false,
expectedError: nil,
},
{
name: "production or staging with match labels",
selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "environment",
Operator: "In",
Values: []string{
"production",
"staging",
},
},
},
MatchLabels: map[string]string{
"org": "foo",
},
},
values: map[string]string{
"name": "baz",
},
expected: []map[string]string{
{"values.name": "baz", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging"},
},
clientError: false,
expectedError: nil,
},
{
name: "simulate client error",
selector: metav1.LabelSelector{},
values: nil,
expected: nil,
clientError: true,
expectedError: fmt.Errorf("could not list Secrets"),
},
}
// convert []client.Object to []runtime.Object, for use by kubefake package
runtimeClusters := []runtime.Object{}
for _, clientCluster := range clusters {
runtimeClusters = append(runtimeClusters, clientCluster)
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
cl := &possiblyErroringFakeCtrlRuntimeClient{
fakeClient,
testCase.clientError,
}
var clusterGenerator = NewClusterGenerator(cl, context.Background(), appClientset, "namespace")
got, err := clusterGenerator.GenerateParams(&argoappsetv1alpha1.ApplicationSetGenerator{
Clusters: &argoappsetv1alpha1.ClusterGenerator{
Selector: testCase.selector,
Values: testCase.values,
},
}, nil)
if testCase.expectedError != nil {
assert.EqualError(t, err, testCase.expectedError.Error())
} else {
assert.NoError(t, err)
assert.ElementsMatch(t, testCase.expected, got)
}
})
}
}
func TestSanitizeClusterName(t *testing.T) {
t.Run("valid DNS-1123 subdomain name", func(t *testing.T) {
assert.Equal(t, "cluster-name", sanitizeName("cluster-name"))
})
t.Run("invalid DNS-1123 subdomain name", func(t *testing.T) {
invalidName := "-.--CLUSTER/name -./.-"
assert.Equal(t, "cluster-name", sanitizeName(invalidName))
})
}

View File

@@ -1,229 +0,0 @@
package generators
import (
"context"
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v2/util/settings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"github.com/argoproj/argo-cd/v2/applicationset/utils"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
var _ Generator = (*DuckTypeGenerator)(nil)
// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD.
type DuckTypeGenerator struct {
ctx context.Context
dynClient dynamic.Interface
clientset kubernetes.Interface
namespace string // namespace is the Argo CD namespace
settingsManager *settings.SettingsManager
}
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator {
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
g := &DuckTypeGenerator{
ctx: ctx,
dynClient: dynClient,
clientset: clientset,
namespace: namespace,
settingsManager: settingsManager,
}
return g
}
func (g *DuckTypeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
// Return a requeue default of 3 minutes, if no override is specified.
if appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds != nil {
return time.Duration(*appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds) * time.Second
}
return DefaultRequeueAfterSeconds
}
func (g *DuckTypeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
return &appSetGenerator.ClusterDecisionResource.Template
}
func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, _ *argoprojiov1alpha1.ApplicationSet) ([]map[string]string, error) {
if appSetGenerator == nil {
return nil, EmptyAppSetGeneratorError
}
// Not likely to happen
if appSetGenerator.ClusterDecisionResource == nil {
return nil, EmptyAppSetGeneratorError
}
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
if err != nil {
return nil, err
}
if clustersFromArgoCD == nil {
return nil, nil
}
// Read the configMapRef
cm, err := g.clientset.CoreV1().ConfigMaps(g.namespace).Get(g.ctx, appSetGenerator.ClusterDecisionResource.ConfigMapRef, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Extract GVK data for the dynamic client to use
versionIdx := strings.Index(cm.Data["apiVersion"], "/")
kind := cm.Data["kind"]
resourceName := appSetGenerator.ClusterDecisionResource.Name
labelSelector := appSetGenerator.ClusterDecisionResource.LabelSelector
log.WithField("kind.apiVersion", kind+"."+cm.Data["apiVersion"]).Info("Kind.Group/Version Reference")
// Validate the fields
if kind == "" || versionIdx < 1 {
log.Warningf("kind=%v, resourceName=%v, versionIdx=%v", kind, resourceName, versionIdx)
return nil, fmt.Errorf("There is a problem with the apiVersion, kind or resourceName provided")
}
if (resourceName == "" && labelSelector.MatchLabels == nil && labelSelector.MatchExpressions == nil) ||
(resourceName != "" && (labelSelector.MatchExpressions != nil || labelSelector.MatchLabels != nil)) {
log.Warningf("You must choose either resourceName=%v, labelSelector.matchLabels=%v or labelSelect.matchExpressions=%v", resourceName, labelSelector.MatchLabels, labelSelector.MatchExpressions)
return nil, fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator")
}
// Split up the apiVersion
group := cm.Data["apiVersion"][0:versionIdx]
version := cm.Data["apiVersion"][versionIdx+1:]
log.WithField("kind.group.version", kind+"."+group+"/"+version).Debug("decoded Ref")
duckGVR := schema.GroupVersionResource{Group: group, Version: version, Resource: kind}
listOptions := metav1.ListOptions{}
if resourceName == "" {
listOptions.LabelSelector = metav1.FormatLabelSelector(&labelSelector)
log.WithField("listOptions.LabelSelector", listOptions.LabelSelector).Info("selection type")
} else {
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", resourceName).String()
//metav1.Convert_fields_Selector_To_string(fields.).Sprintf("metadata.name=%s", resourceName)
log.WithField("listOptions.FieldSelector", listOptions.FieldSelector).Info("selection type")
}
duckResources, err := g.dynClient.Resource(duckGVR).Namespace(g.namespace).List(g.ctx, listOptions)
if err != nil {
log.WithField("GVK", duckGVR).Warning("resources were not found")
return nil, err
}
if len(duckResources.Items) == 0 {
log.Warning("no resource found, make sure you clusterDecisionResource is defined correctly")
return nil, fmt.Errorf("no clusterDecisionResources found")
}
// Override the duck type in the status of the resource
statusListKey := "clusters"
matchKey := cm.Data["matchKey"]
if cm.Data["statusListKey"] != "" {
statusListKey = cm.Data["statusListKey"]
}
if matchKey == "" {
log.WithField("matchKey", matchKey).Warning("matchKey not found in " + cm.Name)
return nil, nil
}
res := []map[string]string{}
clusterDecisions := []interface{}{}
// Build the decision slice
for _, duckResource := range duckResources.Items {
log.WithField("duckResourceName", duckResource.GetName()).Debug("found resource")
if duckResource.Object["status"] == nil || len(duckResource.Object["status"].(map[string]interface{})) == 0 {
log.Warningf("clusterDecisionResource: %s, has no status", duckResource.GetName())
continue
}
log.WithField("duckResourceStatus", duckResource.Object["status"]).Debug("found resource")
clusterDecisions = append(clusterDecisions, duckResource.Object["status"].(map[string]interface{})[statusListKey].([]interface{})...)
}
log.Infof("Number of decisions found: %v", len(clusterDecisions))
// Read this outside the loop to improve performance
argoClusters := clustersFromArgoCD.Items
if len(clusterDecisions) > 0 {
for _, cluster := range clusterDecisions {
// generated instance of cluster params
params := map[string]string{}
log.Infof("cluster: %v", cluster)
matchValue := cluster.(map[string]interface{})[matchKey]
if matchValue == nil || matchValue.(string) == "" {
log.Warningf("matchKey=%v not found in \"%v\" list: %v\n", matchKey, statusListKey, cluster.(map[string]interface{}))
continue
}
strMatchValue := matchValue.(string)
log.WithField(matchKey, strMatchValue).Debug("validate against ArgoCD")
found := false
for _, argoCluster := range argoClusters {
if argoCluster.Name == strMatchValue {
log.WithField(matchKey, argoCluster.Name).Info("matched cluster in ArgoCD")
params["name"] = argoCluster.Name
params["server"] = argoCluster.Server
found = true
break // Stop looking
}
}
if !found {
log.WithField(matchKey, strMatchValue).Warning("unmatched cluster in ArgoCD")
continue
}
for key, value := range cluster.(map[string]interface{}) {
params[key] = value.(string)
}
for key, value := range appSetGenerator.ClusterDecisionResource.Values {
params[fmt.Sprintf("values.%s", key)] = value
}
res = append(res, params)
}
} else {
log.Warningf("clusterDecisionResource status." + statusListKey + " missing")
return nil, nil
}
return res, nil
}

View File

@@ -1,315 +0,0 @@
package generators
import (
"context"
"fmt"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
dynfake "k8s.io/client-go/dynamic/fake"
kubefake "k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/controller-runtime/pkg/client"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
"testing"
)
const resourceApiVersion = "mallard.io/v1"
const resourceKind = "ducks"
const resourceName = "quak"
func TestGenerateParamsForDuckType(t *testing.T) {
clusters := []client.Object{
&corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "staging-01",
Namespace: "namespace",
Labels: map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "staging",
"org": "foo",
},
Annotations: map[string]string{
"foo.argoproj.io": "staging",
},
},
Data: map[string][]byte{
"config": []byte("{}"),
"name": []byte("staging-01"),
"server": []byte("https://staging-01.example.com"),
},
Type: corev1.SecretType("Opaque"),
},
&corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "production-01",
Namespace: "namespace",
Labels: map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "production",
"org": "bar",
},
Annotations: map[string]string{
"foo.argoproj.io": "production",
},
},
Data: map[string][]byte{
"config": []byte("{}"),
"name": []byte("production-01"),
"server": []byte("https://production-01.example.com"),
},
Type: corev1.SecretType("Opaque"),
},
}
duckType := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": resourceApiVersion,
"kind": "Duck",
"metadata": map[string]interface{}{
"name": resourceName,
"namespace": "namespace",
"labels": map[string]interface{}{"duck": "all-species"},
},
"status": map[string]interface{}{
"decisions": []interface{}{
map[string]interface{}{
"clusterName": "staging-01",
},
map[string]interface{}{
"clusterName": "production-01",
},
},
},
},
}
duckTypeProdOnly := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": resourceApiVersion,
"kind": "Duck",
"metadata": map[string]interface{}{
"name": resourceName,
"namespace": "namespace",
"labels": map[string]interface{}{"duck": "spotted"},
},
"status": map[string]interface{}{
"decisions": []interface{}{
map[string]interface{}{
"clusterName": "production-01",
},
},
},
},
}
duckTypeEmpty := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": resourceApiVersion,
"kind": "Duck",
"metadata": map[string]interface{}{
"name": resourceName,
"namespace": "namespace",
"labels": map[string]interface{}{"duck": "canvasback"},
},
"status": map[string]interface{}{},
},
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-configmap",
Namespace: "namespace",
},
Data: map[string]string{
"apiVersion": resourceApiVersion,
"kind": resourceKind,
"statusListKey": "decisions",
"matchKey": "clusterName",
},
}
testCases := []struct {
name string
configMapRef string
resourceName string
labelSelector metav1.LabelSelector
resource *unstructured.Unstructured
values map[string]string
expected []map[string]string
expectedError error
}{
{
name: "no duck resource",
resourceName: "",
resource: duckType,
values: nil,
expected: []map[string]string{},
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
},
/*** This does not work with the FAKE runtime client, fieldSelectors are broken.
{
name: "invalid name for duck resource",
resourceName: resourceName + "-different",
resource: duckType,
values: nil,
expected: []map[string]string{},
expectedError: fmt.Errorf("duck.mallard.io \"quak\" not found"),
},
***/
{
name: "duck type generator resourceName",
resourceName: resourceName,
resource: duckType,
values: nil,
expected: []map[string]string{
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
},
expectedError: nil,
},
{
name: "production-only",
resourceName: resourceName,
resource: duckTypeProdOnly,
values: map[string]string{
"foo": "bar",
},
expected: []map[string]string{
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
},
expectedError: nil,
},
{
name: "duck type empty status",
resourceName: resourceName,
resource: duckTypeEmpty,
values: nil,
expected: nil,
expectedError: nil,
},
{
name: "duck type empty status labelSelector.matchLabels",
resourceName: "",
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "canvasback"}},
resource: duckTypeEmpty,
values: nil,
expected: nil,
expectedError: nil,
},
{
name: "duck type generator labelSelector.matchLabels",
resourceName: "",
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "all-species"}},
resource: duckType,
values: nil,
expected: []map[string]string{
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
},
expectedError: nil,
},
{
name: "production-only labelSelector.matchLabels",
resourceName: "",
resource: duckTypeProdOnly,
labelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"duck": "spotted"}},
values: map[string]string{
"foo": "bar",
},
expected: []map[string]string{
{"clusterName": "production-01", "values.foo": "bar", "name": "production-01", "server": "https://production-01.example.com"},
},
expectedError: nil,
},
{
name: "duck type generator labelSelector.matchExpressions",
resourceName: "",
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "duck",
Operator: "In",
Values: []string{"all-species", "marbled"},
},
}},
resource: duckType,
values: nil,
expected: []map[string]string{
{"clusterName": "production-01", "name": "production-01", "server": "https://production-01.example.com"},
{"clusterName": "staging-01", "name": "staging-01", "server": "https://staging-01.example.com"},
},
expectedError: nil,
},
{
name: "duck type generator resourceName and labelSelector.matchExpressions",
resourceName: resourceName,
labelSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "duck",
Operator: "In",
Values: []string{"all-species", "marbled"},
},
}},
resource: duckType,
values: nil,
expected: nil,
expectedError: fmt.Errorf("There is a problem with the definition of the ClusterDecisionResource generator"),
},
}
// convert []client.Object to []runtime.Object, for use by kubefake package
runtimeClusters := []runtime.Object{}
for _, clientCluster := range clusters {
runtimeClusters = append(runtimeClusters, clientCluster)
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
appClientset := kubefake.NewSimpleClientset(append(runtimeClusters, configMap)...)
gvrToListKind := map[schema.GroupVersionResource]string{{
Group: "mallard.io",
Version: "v1",
Resource: "ducks",
}: "DuckList"}
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
var duckTypeGenerator = NewDuckTypeGenerator(context.Background(), fakeDynClient, appClientset, "namespace")
got, err := duckTypeGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
ClusterDecisionResource: &argoprojiov1alpha1.DuckTypeGenerator{
ConfigMapRef: "my-configmap",
Name: testCase.resourceName,
LabelSelector: testCase.labelSelector,
Values: testCase.values,
},
}, nil)
if testCase.expectedError != nil {
assert.EqualError(t, err, testCase.expectedError.Error())
} else {
assert.NoError(t, err)
assert.ElementsMatch(t, testCase.expected, got)
}
})
}
}

View File

@@ -1,83 +0,0 @@
package generators
import (
"reflect"
"github.com/imdario/mergo"
log "github.com/sirupsen/logrus"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
type TransformResult struct {
Params []map[string]string
Template argoprojiov1alpha1.ApplicationSetTemplate
}
//Transform a spec generator to list of paramSets and a template
func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, allGenerators map[string]Generator, baseTemplate argoprojiov1alpha1.ApplicationSetTemplate, appSet *argoprojiov1alpha1.ApplicationSet) ([]TransformResult, error) {
res := []TransformResult{}
var firstError error
generators := GetRelevantGenerators(&requestedGenerator, allGenerators)
for _, g := range generators {
// we call mergeGeneratorTemplate first because GenerateParams might be more costly so we want to fail fast if there is an error
mergedTemplate, err := mergeGeneratorTemplate(g, &requestedGenerator, baseTemplate)
if err != nil {
log.WithError(err).WithField("generator", g).
Error("error generating params")
if firstError == nil {
firstError = err
}
continue
}
params, err := g.GenerateParams(&requestedGenerator, appSet)
if err != nil {
log.WithError(err).WithField("generator", g).
Error("error generating params")
if firstError == nil {
firstError = err
}
continue
}
res = append(res, TransformResult{
Params: params,
Template: mergedTemplate,
})
}
return res, firstError
}
func GetRelevantGenerators(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, generators map[string]Generator) []Generator {
var res []Generator
v := reflect.Indirect(reflect.ValueOf(requestedGenerator))
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
if !field.CanInterface() {
continue
}
if !reflect.ValueOf(field.Interface()).IsNil() {
res = append(res, generators[v.Type().Field(i).Name])
}
}
return res
}
func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) (argoprojiov1alpha1.ApplicationSetTemplate, error) {
// Make a copy of the value from `GetTemplate()` before merge, rather than copying directly into
// the provided parameter (which will touch the original resource object returned by client-go)
dest := g.GetTemplate(requestedGenerator).DeepCopy()
err := mergo.Merge(dest, applicationSetTemplate)
return *dest, err
}

Some files were not shown because too many files have changed in this diff Show More