Compare commits

..

14 Commits

Author SHA1 Message Date
Alexander Matyushentsev
1b0e3e7b80 Update manifests to v0.9.2 2018-09-28 09:36:21 -07:00
Jesse Suen
2faf45af94 Add version check during release to ensure compiled version is accurate (#646) 2018-09-27 18:57:44 -07:00
Jesse Suen
b8c4436870 Update generated files 2018-09-27 18:09:50 -07:00
Jesse Suen
f8b2576f19 Fix issue where argocd-server logged credentials in plain text during repo add (issue #653) 2018-09-27 18:04:10 -07:00
Jesse Suen
61f4513e52 Switch to go-git for all remote git interactions including auth (issue #651) 2018-09-27 18:03:56 -07:00
Jesse Suen
4d210c887b Do not append .git extension during normalization for Azure hosted git (issue #643) (#645) 2018-09-27 18:03:33 -07:00
Alexander Matyushentsev
ab6b2969ac Issue #650 - Temporary ignore service catalog resources (#661) 2018-09-27 18:00:24 -07:00
Andrew Merenbach
447dd30768 Update generated files (#660) 2018-09-27 13:53:56 -07:00
dthomson25
ab98589e1a Normalize policies by always adding space after comma (#659) 2018-09-27 13:30:49 -07:00
Stephen Haynes
2fd6c11338 update to kustomize 1.0.8 (#644) 2018-09-26 21:28:08 -07:00
Alexander Matyushentsev
b90102bc68 Update argocd VERSION file 2018-09-24 15:28:17 -07:00
Alexander Matyushentsev
1aa7146d3e Update manifests to v0.9.1 2018-09-24 14:26:20 -07:00
Alexander Matyushentsev
b366977265 Issue #639 - Repo server unable to execute ls-remote for private repos (#640) 2018-09-24 14:22:23 -07:00
Alexander Matyushentsev
b6439dc545 Update manifests to v0.9.0 2018-09-24 13:16:09 -07:00
2510 changed files with 316130 additions and 368980 deletions

107
.argo-ci/ci.yaml Normal file
View File

@@ -0,0 +1,107 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argo-cd-ci-
spec:
entrypoint: argo-cd-ci
arguments:
parameters:
- name: revision
value: master
- name: repo
value: https://github.com/argoproj/argo-cd.git
templates:
- name: argo-cd-ci
steps:
- - name: build
template: ci-dind
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- make controller-image server-image repo-server-image
- name: test
template: ci-builder
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- dep ensure && make cli lint
- name: test-coverage
template: ci-builder
arguments:
parameters:
- name: cmd
value: "dep ensure && go get github.com/mattn/goveralls && make test-coverage"
- name: test-e2e
template: ci-builder
arguments:
parameters:
- name: cmd
value: "dep ensure && make test-e2e"
- name: ci-builder
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["mkfifo pipe; tee /tmp/logs.txt < pipe & {{inputs.parameters.cmd}} > pipe"]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: COVERALLS_TOKEN
valueFrom:
secretKeyRef:
name: coverall-token
key: coverall-token
resources:
requests:
memory: 1024Mi
cpu: 200m
outputs:
artifacts:
- name: logs
path: /tmp/logs.txt
- name: ci-dind
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["mkfifo pipe; tee /tmp/logs.txt < pipe & until docker ps; do sleep 3; done && {{inputs.parameters.cmd}} > pipe"]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: DOCKER_HOST
value: 127.0.0.1
resources:
requests:
memory: 1024Mi
cpu: 200m
sidecars:
- name: dind
image: docker:17.10-dind
securityContext:
privileged: true
mirrorVolumeMounts: true
outputs:
artifacts:
- name: logs
path: /tmp/logs.txt

View File

@@ -1,18 +0,0 @@
ignore:
- "**/*.pb.go"
- "**/*.pb.gw.go"
- "**/*generated.go"
- "**/*generated.deepcopy.go"
- "**/*_test.go"
- "pkg/apis/client/.*"
- "pkg/client/.*"
- "vendor/.*"
- "test/.*"
coverage:
status:
# we've found this not to be useful
patch: off
project:
default:
# allow test coverage to drop by 2%, assume that it's typically due to CI problems
threshold: 2

View File

@@ -1,13 +1,4 @@
# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from
# a user's workspace, and are only building off of what is locked by dep.
.vscode/
.idea/
.DS_Store
vendor/
dist/
*.iml
# delve debug binaries
cmd/**/debug
debug.test
coverage.out
ui/node_modules/
vendor
dist

View File

@@ -1,43 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'bug'
assignees: ''
---
<!-- If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a question in argocd slack [channel](https://argoproj.github.io/community/join-slack). -->
Checklist:
* [ ] I've searched in the docs and FAQ for my answer: https://bit.ly/argocd-faq.
* [ ] I've included steps to reproduce the bug.
* [ ] I've pasted the output of `argocd version`.
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
**To Reproduce**
<!-- A list of the steps required to reproduce the issue. Best of all, give us the URL to a repository that exhibits this issue. -->
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots**
<!-- If applicable, add screenshots to help explain your problem. -->
**Version**
```shell
Paste the output from `argocd version` here.
```
**Logs**
```
Paste any relevant application logs here.
```

View File

@@ -1,12 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Have you read the docs?
url: https://argo-cd.readthedocs.io/
about: Much help can be found in the docs
- name: Ask a question
url: https://github.com/argoproj/argo-cd/discussions/new
about: Ask a question or start a discussion about Argo CD
- name: Chat on Slack
url: https://argoproj.github.io/community/join-slack
about: Maybe chatting with the community can help

View File

@@ -1,18 +0,0 @@
---
name: Enhancement proposal
about: Propose an enhancement for this project
title: ''
labels: 'enhancement'
assignees: ''
---
# Summary
What change you think needs making.
# Motivation
Please give examples of your use case, e.g. when would you use this.
# Proposal
How do you think this should be implemented?

View File

@@ -1 +0,0 @@
# See https://github.com/probot/no-response

View File

@@ -1,17 +0,0 @@
Note on DCO:
If the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.
Checklist:
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
* [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue.
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
* [ ] Does this PR require documentation updates?
* [ ] I've updated documentation as required by this PR.
* [ ] Optional. My organization is added to USERS.md.
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/tree/master/community#contributing-to-argo)
* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).

4
.github/stale.yml vendored
View File

@@ -1,4 +0,0 @@
# See https://github.com/probot/stale
# See https://github.com/probot/stale
exemptLabels:
- backlog

View File

@@ -1,468 +0,0 @@
name: Integration tests
on:
push:
branches:
- 'master'
- 'release-*'
- '!release-1.4'
- '!release-1.5'
pull_request:
branches:
- 'master'
env:
# Golang version to use across CI steps
GOLANG_VERSION: '1.18'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
check-go:
name: Ensure Go modules synchronicity
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Download all Go modules
run: |
go mod download
- name: Check for tidyness of go.mod and go.sum
run: |
go mod tidy
git diff --exit-code -- .
build-go:
name: Build & cache Go code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Download all Go modules
run: |
go mod download
- name: Compile all packages
run: make build-local
lint-go:
permissions:
contents: read # for actions/checkout to fetch code
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
name: Lint Go code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Run golangci-lint
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
with:
version: v1.45.2
args: --timeout 10m --exclude SA5011 --verbose
test-go:
name: Run unit tests for Go packages
runs-on: ubuntu-latest
needs:
- build-go
env:
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
run: |
sudo apt-get install git -y
- name: Switch to temporal branch so we re-attach head
run: |
git switch -c temporal-pr-branch
git status
- name: Fetch complete history for blame information
run: |
git fetch --prune --no-tags --depth=1 origin +refs/heads/*:refs/remotes/origin/*
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
# We install kustomize in the dist directory
- name: Add dist to PATH
run: |
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Download and vendor all required packages
run: |
go mod download
- name: Run all unit tests
run: make test-local
- name: Generate code coverage artifacts
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: code-coverage
path: coverage.out
- name: Generate test results artifacts
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: test-results
path: test-results/
test-go-race:
name: Run unit tests with -race, for Go packages
runs-on: ubuntu-latest
needs:
- build-go
env:
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Create checkout directory
run: mkdir -p ~/go/src/github.com/argoproj
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
run: |
sudo apt-get install git -y
- name: Switch to temporal branch so we re-attach head
run: |
git switch -c temporal-pr-branch
git status
- name: Fetch complete history for blame information
run: |
git fetch --prune --no-tags --depth=1 origin +refs/heads/*:refs/remotes/origin/*
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
# We install kustomize in the dist directory
- name: Add dist to PATH
run: |
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Download and vendor all required packages
run: |
go mod download
- name: Run all unit tests
run: make test-race-local
- name: Generate test results artifacts
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: race-results
path: test-results/
codegen:
name: Check changes to generated code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Create symlink in GOPATH
run: |
mkdir -p ~/go/src/github.com/argoproj
cp -a ../argo-cd ~/go/src/github.com/argoproj
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Download & vendor dependencies
run: |
# We need to vendor go modules for codegen yet
go mod download
go mod vendor -v
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
- name: Install toolchain for codegen
run: |
make install-codegen-tools-local
make install-go-tools-local
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
# We install kustomize in the dist directory
- name: Add dist to PATH
run: |
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
- name: Run codegen
run: |
set -x
export GOPATH=$(go env GOPATH)
git checkout -- go.mod go.sum
make codegen-local
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
- name: Check nothing has changed
run: |
set -xo pipefail
git diff --exit-code -- . ':!go.sum' ':!go.mod' ':!assets/swagger.json' | tee codegen.patch
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
build-ui:
name: Build, test & lint UI code
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Setup NodeJS
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
with:
node-version: '12.18.4'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
- name: Install node dependencies
run: |
cd ui && yarn install --frozen-lockfile --ignore-optional --non-interactive
- name: Build UI code
run: |
yarn test
yarn build
env:
NODE_ENV: production
NODE_ONLINE_ENV: online
HOST_ARCH: amd64
working-directory: ui/
- name: Run ESLint
run: yarn lint
working-directory: ui/
analyze:
name: Process & analyze test artifacts
runs-on: ubuntu-latest
needs:
- test-go
- build-ui
env:
sonar_secret: ${{ secrets.SONAR_TOKEN }}
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
fetch-depth: 0
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
- name: Remove other node_modules directory
run: |
rm -rf ui/node_modules/argo-ui/node_modules
- name: Create test-results directory
run: |
mkdir -p test-results
- name: Get code coverage artifiact
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: code-coverage
- name: Get test result artifact
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
with:
name: test-results
path: test-results
- name: Upload code coverage information to codecov.io
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
with:
file: coverage.out
- name: Perform static code analysis using SonarCloud
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
SCANNER_VERSION: 4.2.0.1873
SCANNER_PATH: /tmp/cache/scanner
OS: linux
run: |
# We do not use the provided action, because it does contain an old
# version of the scanner, and also takes time to build.
set -e
mkdir -p ${SCANNER_PATH}
export SONAR_USER_HOME=${SCANNER_PATH}/.sonar
if [[ ! -x "${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner" ]]; then
curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip
unzip -qq -o sonar-scanner-cli-${SCANNER_VERSION}-${OS}.zip -d ${SCANNER_PATH}
fi
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
chmod +x ${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/jre/bin/java
# Explicitly set NODE_MODULES
export NODE_MODULES=${PWD}/ui/node_modules
export NODE_PATH=${PWD}/ui/node_modules
${SCANNER_PATH}/sonar-scanner-${SCANNER_VERSION}-${OS}/bin/sonar-scanner
if: env.sonar_secret != ''
test-e2e:
name: Run end-to-end tests
runs-on: ubuntu-latest
strategy:
matrix:
k3s-version: [v1.23.3, v1.22.6, v1.21.2]
needs:
- build-go
env:
GOPATH: /home/runner/go
ARGOCD_FAKE_IN_CLUSTER: "true"
ARGOCD_SSH_DATA_PATH: "/tmp/argo-e2e/app/config/ssh"
ARGOCD_TLS_DATA_PATH: "/tmp/argo-e2e/app/config/tls"
ARGOCD_E2E_SSH_KNOWN_HOSTS: "../fixture/certs/ssh_known_hosts"
ARGOCD_E2E_K3S: "true"
ARGOCD_IN_CI: "true"
ARGOCD_E2E_APISERVER_PORT: "8088"
ARGOCD_SERVER: "127.0.0.1:8088"
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: GH actions workaround - Kill XSP4 process
run: |
sudo pkill mono || true
# ubuntu-22.04 comes with kubectl, but the version is not pinned. The version as of 2022-12-05 is 1.26.0 which
# breaks the `TestNamespacedResourceDiffing` e2e test. So we'll pin to 1.25 and then fix the underlying issue.
- name: Install kubectl
run: |
rm /usr/local/bin/kubectl
curl -LO https://dl.k8s.io/release/v1.25.4/bin/linux/amd64/kubectl
mv kubectl /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl
- name: Install K3S
env:
INSTALL_K3S_VERSION: ${{ matrix.k3s-version }}+k3s1
run: |
set -x
curl -sfL https://get.k3s.io | sh -
sudo chmod -R a+rw /etc/rancher/k3s
sudo mkdir -p $HOME/.kube && sudo chown -R runner $HOME/.kube
sudo k3s kubectl config view --raw > $HOME/.kube/config
sudo chown runner $HOME/.kube/config
kubectl version
- name: Restore go build cache
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Add ~/go/bin to PATH
run: |
echo "/home/runner/go/bin" >> $GITHUB_PATH
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Add ./dist to PATH
run: |
echo "$(pwd)/dist" >> $GITHUB_PATH
- name: Download Go dependencies
run: |
go mod download
go install github.com/mattn/goreman@latest
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
- name: Setup git username and email
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Pull Docker image required for tests
run: |
docker pull ghcr.io/dexidp/dex:v2.35.3
docker pull argoproj/argo-cd-ci-builder:v1.0.0
docker pull redis:7.0.7-alpine
- name: Create target directory for binaries in the build-process
run: |
mkdir -p dist
chown runner dist
- name: Run E2E server and wait for it being available
timeout-minutes: 30
run: |
set -x
# Something is weird in GH runners -- there's a phantom listener for
# port 8080 which is not visible in netstat -tulpen, but still there
# with a HTTP listener. We have API server listening on port 8088
# instead.
make start-e2e-local 2>&1 | sed -r "s/[[:cntrl:]]\[[0-9]{1,3}m//g" > /tmp/e2e-server.log &
count=1
until curl -f http://127.0.0.1:8088/healthz; do
sleep 10;
if test $count -ge 180; then
echo "Timeout"
exit 1
fi
count=$((count+1))
done
- name: Run E2E testsuite
run: |
set -x
make test-e2e-local
- name: Upload e2e-server logs
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
with:
name: e2e-server-k8s${{ matrix.k3s-version }}.log
path: /tmp/e2e-server.log
if: ${{ failure() }}

View File

@@ -1,58 +0,0 @@
name: "Code scanning - action"
on:
push:
# Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot
branches-ignore:
- 'dependabot/**'
pull_request:
schedule:
- cron: '0 19 * * 0'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
CodeQL-Build:
permissions:
actions: read # for github/codeql-action/init to get workflow details
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/autobuild to send a status report
if: github.repository == 'argoproj/argo-cd'
# CodeQL runs on ubuntu-latest and windows-latest
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
# Override language selection by uncommenting this and choosing your languages
# with:
# languages: go, javascript, csharp, python, cpp, java
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33

View File

@@ -1,153 +0,0 @@
name: Image
on:
push:
branches:
- master
pull_request:
branches:
- master
types: [ labeled, unlabeled, opened, synchronize, reopened ]
env:
GOLANG_VERSION: '1.18'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
publish:
permissions:
contents: write # for git to push upgrade commit if not already deployed
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-latest
env:
GOPATH: /home/runner/work/argo-cd/argo-cd
steps:
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
path: src/github.com/argoproj/argo-cd
# get image tag
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
working-directory: ./src/github.com/argoproj/argo-cd
id: image
# login
- run: |
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
if: github.event_name == 'push'
env:
USERNAME: ${{ secrets.USERNAME }}
PASSWORD: ${{ secrets.TOKEN }}
DOCKER_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
# build
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
- name: Setup cache for argocd-ui docker layer
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-single-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-single-buildx
- name: Build cache for argocd-ui stage
uses: docker/build-push-action@v2
with:
context: ./src/github.com/argoproj/argo-cd
target: argocd-ui
push: false
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
- name: Run non-container Snyk scans
if: github.event_name == 'push'
working-directory: ./src/github.com/argoproj/argo-cd
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
run: |
npm install -g snyk
# Run with high threshold to fail build.
snyk test --org=argoproj --all-projects --exclude=docs,site --severity-threshold=high --policy-path=.snyk
snyk iac test manifests/install.yaml --org=argoproj --severity-threshold=high --policy-path=.snyk
- run: |
IMAGE_PLATFORMS=linux/amd64
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
then
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
fi
echo "Building image for platforms: $IMAGE_PLATFORMS"
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
--cache-from "type=local,src=/tmp/.buildx-cache" \
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
-t quay.io/argoproj/argocd:latest .
working-directory: ./src/github.com/argoproj/argo-cd
- name: Run container Snyk scan
if: github.event_name == 'push'
working-directory: ./src/github.com/argoproj/argo-cd
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
run: |
snyk container test quay.io/argoproj/argocd:latest --org=argoproj --file=Dockerfile --severity-threshold=high
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Clean up build cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'test-arm-image')
# sign container images
- name: Install cosign
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
with:
cosign-release: 'v1.13.1'
- name: Install crane to get digest of image
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
- name: Get digest of image
run: |
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
- name: Sign Argo CD latest image
run: |
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
# Displays the public key to share.
cosign public-key --key env://COSIGN_PRIVATE_KEY
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ github.event_name == 'push' }}
# deploy
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
if: github.event_name == 'push'
env:
TOKEN: ${{ secrets.TOKEN }}
- run: |
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
git config --global user.email 'ci@argoproj.com'
git config --global user.name 'CI'
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
if: github.event_name == 'push'
working-directory: argoproj-deployments/argocd
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-GitHub-Package-Registry/m-p/41202/thread-id/9811

View File

@@ -1,337 +0,0 @@
name: Create ArgoCD release
on:
push:
tags:
- "release-v*"
- "!release-v1.5*"
- "!release-v1.4*"
- "!release-v1.3*"
- "!release-v1.2*"
- "!release-v1.1*"
- "!release-v1.0*"
- "!release-v0*"
env:
GOLANG_VERSION: '1.18'
permissions:
contents: read
jobs:
prepare-release:
permissions:
contents: write # To push changes to release branch
name: Perform automatic release on trigger ${{ github.ref }}
if: github.repository == 'argoproj/argo-cd'
runs-on: ubuntu-latest
env:
# The name of the tag as supplied by the GitHub event
SOURCE_TAG: ${{ github.ref }}
# The image namespace where Docker image will be published to
IMAGE_NAMESPACE: quay.io/argoproj
# Whether to create & push image and release assets
DRY_RUN: false
# Whether a draft release should be created, instead of public one
DRAFT_RELEASE: false
# Whether to update homebrew with this release as well
# Set RELEASE_HOMEBREW_TOKEN secret in repository for this to work - needs
# access to public repositories
UPDATE_HOMEBREW: false
# Name of the GitHub user for Git config
GIT_USERNAME: argo-bot
# E-Mail of the GitHub user for Git config
GIT_EMAIL: argoproj@gmail.com
steps:
- name: Checkout code
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Check if the published tag is well formed and setup vars
run: |
set -xue
# Target version must match major.minor.patch and optional -rcX suffix
# where X must be a number.
TARGET_VERSION=${SOURCE_TAG#*release-v}
if ! echo "${TARGET_VERSION}" | egrep '^[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)*$'; then
echo "::error::Target version '${TARGET_VERSION}' is malformed, refusing to continue." >&2
exit 1
fi
# Target branch is the release branch we're going to operate on
# Its name is 'release-<major>.<minor>'
TARGET_BRANCH="release-${TARGET_VERSION%\.[0-9]*}"
# The release tag is the source tag, minus the release- prefix
RELEASE_TAG="${SOURCE_TAG#*release-}"
# Whether this is a pre-release (indicated by -rc suffix)
PRE_RELEASE=false
if echo "${RELEASE_TAG}" | egrep -- '-rc[0-9]+$'; then
PRE_RELEASE=true
fi
# We must not have a release trigger within the same release branch,
# because that means a release for this branch is already running.
if git tag -l | grep "release-v${TARGET_VERSION%\.[0-9]*}" | grep -v "release-v${TARGET_VERSION}"; then
echo "::error::Another release for branch ${TARGET_BRANCH} is currently in progress."
exit 1
fi
# Ensure that release do not yet exist
if git rev-parse ${RELEASE_TAG}; then
echo "::error::Release tag ${RELEASE_TAG} already exists in repository. Refusing to continue."
exit 1
fi
# Make the variables available in follow-up steps
echo "TARGET_VERSION=${TARGET_VERSION}" >> $GITHUB_ENV
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> $GITHUB_ENV
echo "RELEASE_TAG=${RELEASE_TAG}" >> $GITHUB_ENV
echo "PRE_RELEASE=${PRE_RELEASE}" >> $GITHUB_ENV
- name: Check if our release tag has a correct annotation
run: |
set -ue
# Fetch all tag information as well
git fetch --prune --tags --force
echo "=========== BEGIN COMMIT MESSAGE ============="
git show ${SOURCE_TAG}
echo "============ END COMMIT MESSAGE =============="
# Quite dirty hack to get the release notes from the annotated tag
# into a temporary file.
RELEASE_NOTES=$(mktemp -p /tmp release-notes.XXXXXX)
prefix=true
begin=false
git show ${SOURCE_TAG} | while read line; do
# Whatever is in commit history for the tag, we only want that
# annotation from our tag. We discard everything else.
if test "$begin" = "false"; then
if echo "$line" | grep -q "tag ${SOURCE_TAG#refs/tags/}"; then begin="true"; fi
continue
fi
if test "$prefix" = "true"; then
if test -z "$line"; then prefix=false; fi
else
if echo "$line" | egrep -q '^commit [0-9a-f]+'; then
break
fi
echo "$line" >> ${RELEASE_NOTES}
fi
done
# For debug purposes
echo "============BEGIN RELEASE NOTES================="
cat ${RELEASE_NOTES}
echo "=============END RELEASE NOTES=================="
# Too short release notes are suspicious. We need at least 100 bytes.
relNoteLen=$(stat -c '%s' $RELEASE_NOTES)
if test $relNoteLen -lt 100; then
echo "::error::No release notes provided in tag annotation (or tag is not annotated)"
exit 1
fi
# Check for magic string '## Quick Start' in head of release notes
if ! head -2 ${RELEASE_NOTES} | grep -iq '## Quick Start'; then
echo "::error::Release notes seem invalid, quick start section not found."
exit 1
fi
# We store path to temporary release notes file for later reading, we
# need it when creating release.
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
- name: Setup Golang
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Setup Git author information
run: |
set -ue
git config --global user.email "${GIT_EMAIL}"
git config --global user.name "${GIT_USERNAME}"
- name: Checkout corresponding release branch
run: |
set -ue
echo "Switching to release branch '${TARGET_BRANCH}'"
if ! git checkout ${TARGET_BRANCH}; then
echo "::error::Checking out release branch '${TARGET_BRANCH}' for target version '${TARGET_VERSION}' (tagged '${RELEASE_TAG}') failed. Does it exist in repo?"
exit 1
fi
- name: Create VERSION information
run: |
set -ue
echo "Bumping version from $(cat VERSION) to ${TARGET_VERSION}"
echo "${TARGET_VERSION}" > VERSION
git commit -m "Bump version to ${TARGET_VERSION}" VERSION
- name: Generate new set of manifests
run: |
set -ue
make install-codegen-tools-local
# We install kustomize in the dist directory
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
make manifests-local VERSION=${TARGET_VERSION}
git diff
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
- name: Create the release tag
run: |
set -ue
echo "Creating release ${RELEASE_TAG}"
git tag ${RELEASE_TAG}
- name: Login to docker repositories
env:
DOCKER_USERNAME: ${{ secrets.RELEASE_DOCKERHUB_USERNAME }}
DOCKER_TOKEN: ${{ secrets.RELEASE_DOCKERHUB_TOKEN }}
QUAY_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
run: |
set -ue
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
# Remove the following when Docker Hub is gone
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
if: ${{ env.DRY_RUN != 'true' }}
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
- name: Build and push Docker image for release
run: |
set -ue
git clean -fd
mkdir -p dist/
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
make release-cli
make checksums
chmod +x ./dist/argocd-linux-amd64
./dist/argocd-linux-amd64 version --client
if: ${{ env.DRY_RUN != 'true' }}
- name: Install cosign
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
with:
cosign-release: 'v1.13.1'
- name: Install crane to get digest of image
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
- name: Get digest of image
run: |
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
- name: Sign Argo CD container images and assets
run: |
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST }}
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
# Retrieves the public key to release as an asset
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ env.DRY_RUN != 'true' }}
- name: Read release notes file
id: release-notes
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
with:
path: ${{ env.RELEASE_NOTES }}
- name: Push changes to release branch
run: |
set -ue
git push origin ${TARGET_BRANCH}
git push origin ${RELEASE_TAG}
- name: Dry run GitHub release
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
id: create_release
with:
tag_name: ${{ env.RELEASE_TAG }}
release_name: ${{ env.RELEASE_TAG }}
draft: ${{ env.DRAFT_RELEASE }}
prerelease: ${{ env.PRE_RELEASE }}
body: ${{ steps.release-notes.outputs.content }}
if: ${{ env.DRY_RUN == 'true' }}
- name: Generate SBOM (spdx)
id: spdx-builder
env:
# defines the spdx/spdx-sbom-generator version to use.
SPDX_GEN_VERSION: v0.0.13
# defines the sigs.k8s.io/bom version to use.
SIGS_BOM_VERSION: v0.2.1
# comma delimited list of project relative folders to inspect for package
# managers (gomod, yarn, npm).
PROJECT_FOLDERS: ".,./ui"
# full qualified name of the docker image to be inspected
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
run: |
yarn install --cwd ./ui
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
go install sigs.k8s.io/bom/cmd/bom@$SIGS_BOM_VERSION
# Generate SPDX for project dependencies analyzing package managers
for folder in $(echo $PROJECT_FOLDERS | sed "s/,/ /g")
do
generator -p $folder -o /tmp
done
# Generate SPDX for binaries analyzing the docker image
if [[ ! -z $DOCKER_IMAGE ]]; then
bom generate -o /tmp/bom-docker-image.spdx -i $DOCKER_IMAGE
fi
cd /tmp && tar -zcf sbom.tar.gz *.spdx
if: ${{ env.DRY_RUN != 'true' }}
- name: Sign sbom
run: |
cosign sign-blob --key env://COSIGN_PRIVATE_KEY /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig
env:
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
if: ${{ env.DRY_RUN != 'true' }}
- name: Create GitHub release
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: ${{ env.RELEASE_TAG }}
tag_name: ${{ env.RELEASE_TAG }}
draft: ${{ env.DRAFT_RELEASE }}
prerelease: ${{ env.PRE_RELEASE }}
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
files: |
dist/argocd-*
/tmp/sbom.tar.gz
/tmp/sbom.tar.gz.sig
if: ${{ env.DRY_RUN != 'true' }}
- name: Update homebrew formula
env:
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
with:
token: ${{env.HOMEBREW_TOKEN}}
formula: argocd
if: ${{ env.HOMEBREW_TOKEN != '' && env.UPDATE_HOMEBREW == 'true' && env.PRE_RELEASE != 'true' }}
- name: Delete original request tag from repository
run: |
set -ue
git push --delete origin ${SOURCE_TAG}
if: ${{ always() }}

17
.gitignore vendored
View File

@@ -2,24 +2,9 @@
.idea/
.DS_Store
vendor/
dist/*
ui/dist/app/*
!ui/dist/app/gitkeep
site/
dist/
*.iml
# delve debug binaries
cmd/**/debug
debug.test
coverage.out
test-results
.scannerwork
.scratch
node_modules/
.kube/
./test/cmp/*.sock
# ignore built binaries
cmd/argocd/argocd
cmd/argocd-application-controller/argocd-application-controller
cmd/argocd-repo-server/argocd-repo-server
cmd/argocd-server/argocd-server

17
.gitpod.Dockerfile vendored
View File

@@ -1,17 +0,0 @@
FROM gitpod/workspace-full
USER root
RUN curl -o /usr/local/bin/kubectl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x /usr/local/bin/kubectl
RUN curl -L https://go.kubebuilder.io/dl/2.3.1/$(go env GOOS)/$(go env GOARCH) | \
tar -xz -C /tmp/ && mv /tmp/kubebuilder_2.3.1_$(go env GOOS)_$(go env GOARCH) /usr/local/kubebuilder
RUN apt-get install redis-server -y
RUN go install github.com/mattn/goreman@latest
USER gitpod
ENV ARGOCD_REDIS_LOCAL=true
ENV KUBECONFIG=/tmp/kubeconfig

View File

@@ -1,6 +0,0 @@
image:
file: .gitpod.Dockerfile
tasks:
- init: make mod-download-local dep-ui-local && GO111MODULE=off go install github.com/mattn/goreman@latest
command: make start-test-k8s

View File

@@ -1,7 +0,0 @@
version: 2
formats: all
mkdocs:
fail_on_warning: false
python:
install:
- requirements: docs/requirements.txt

22
.snyk
View File

@@ -1,22 +0,0 @@
# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
version: v1.22.1
# ignores vulnerabilities until expiry date; change duration by modifying expiry date
ignore:
SNYK-JS-ANSIREGEX-1583908:
- '*':
reason: >-
Code is only run client-side in the swagger-ui endpoint. No risk of
server-side DoS.
SNYK-CC-K8S-44:
- 'manifests/core-install.yaml > *':
reason: >-
Argo CD needs wide permissions to manage resources.
- 'manifests/install.yaml > *':
reason: >-
Argo CD needs wide permissions to manage resources.
SNYK-JS-MOMENT-2440688:
- '*':
reason: >-
Code is only run client-side. No risk of directory traversal.
patch: {}

File diff suppressed because it is too large Load Diff

77
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,77 @@
## Requirements
Make sure you have following tools installed
* [docker](https://docs.docker.com/install/#supported-platforms)
* [golang](https://golang.org/)
* [dep](https://github.com/golang/dep)
* [protobuf](https://developers.google.com/protocol-buffers/)
* [ksonnet](https://github.com/ksonnet/ksonnet#install)
* [helm](https://github.com/helm/helm/releases)
* [kustomize](https://github.com/kubernetes-sigs/kustomize/releases)
* [go-swagger](https://github.com/go-swagger/go-swagger/blob/master/docs/install.md)
* [jq](https://stedolan.github.io/jq/)
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
```
$ brew tap go-swagger/go-swagger
$ brew install go dep protobuf kubectl ksonnet/tap/ks kubernetes-helm jq go-swagger
$ go get -u github.com/golang/protobuf/protoc-gen-go
$ go get -u github.com/go-swagger/go-swagger/cmd/swagger
$ go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
$ go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
```
Nice to have [gometalinter](https://github.com/alecthomas/gometalinter) and [goreman](https://github.com/mattn/goreman):
```
$ go get -u gopkg.in/alecthomas/gometalinter.v2 github.com/mattn/goreman && gometalinter.v2 --install
```
## Building
```
$ go get -u github.com/argoproj/argo-cd
$ dep ensure
$ make
```
NOTE: The make command can take a while, and we recommend building the specific component you are working on
* `make cli` - Make the argocd CLI tool
* `make server` - Make the API/repo/controller server
* `make codegen` - Builds protobuf and swagger files
* `make argocd-util` - Make the administrator's utility, used for certain tasks such as import/export
## Generating ArgoCD manifests for a specific image repository/tag
During development, the `update-manifests.sh` script, can be used to conveniently regenerate the
ArgoCD installation manifests with a customized image namespace and tag. This enables developers
to easily apply manifests which are using the images that they pushed into their personal container
repository.
```
$ IMAGE_NAMESPACE=jessesuen IMAGE_TAG=latest ./hack/update-manifests.sh
$ kubectl apply -n argocd -f ./manifests/install.yaml
```
## Running locally
You need to have access to kubernetes cluster (including [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) or [docker edge](https://docs.docker.com/docker-for-mac/install/) ) in order to run Argo CD on your laptop:
* install kubectl: `brew install kubectl`
* make sure `kubectl` is connected to your cluster (e.g. `kubectl get pods` should work).
* install application CRD using following command:
```
$ kubectl create -f install/manifests/01_application-crd.yaml
```
* start Argo CD services using [goreman](https://github.com/mattn/goreman):
```
$ goreman start
```
## Troubleshooting
* Ensure argocd is installed: ./dist/argocd install
* Ensure you're logged in: ./dist/argocd login --username admin --password <whatever password you set at install> localhost:8080
* Ensure that roles are configured: kubectl create -f install/manifests/02c_argocd-rbac-cm.yaml
* Ensure minikube is running: minikube stop && minikube start
* Ensure Argo CD is aware of minikube: ./dist/argocd cluster add minikube

View File

@@ -1,131 +1,136 @@
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.18 AS builder
FROM golang:1.10.3 as builder
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
RUN apt-get update && apt-get install --no-install-recommends -y \
openssh-server \
nginx \
unzip \
fcgiwrap \
RUN apt-get update && apt-get install -y \
git \
git-lfs \
make \
wget \
gcc \
sudo \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
WORKDIR /tmp
COPY hack/install.sh hack/tool-versions.sh ./
COPY hack/installers installers
# Install docker
ENV DOCKER_VERSION=18.06.0
RUN curl -O https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}-ce.tgz && \
tar -xzf docker-${DOCKER_VERSION}-ce.tgz && \
mv docker/docker /usr/local/bin/docker && \
rm -rf ./docker
# Install dep
ENV DEP_VERSION=0.5.0
RUN wget https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -O /usr/local/bin/dep && \
chmod +x /usr/local/bin/dep
# Install gometalinter
RUN curl -sLo- https://github.com/alecthomas/gometalinter/releases/download/v2.0.5/gometalinter-2.0.5-linux-amd64.tar.gz | \
tar -xzC "$GOPATH/bin" --exclude COPYING --exclude README.md --strip-components 1 -f- && \
ln -s $GOPATH/bin/gometalinter $GOPATH/bin/gometalinter.v2
# Install packr
ENV PACKR_VERSION=1.13.2
RUN wget https://github.com/gobuffalo/packr/releases/download/v${PACKR_VERSION}/packr_${PACKR_VERSION}_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
# Install kubectl
RUN curl -L -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /usr/local/bin/kubectl
# Install ksonnet
# NOTE: we frequently switch between tip of master ksonnet vs. official builds. Comment/uncomment
# the corresponding section to switch between the two options:
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /usr/local/bin/ks
# Option 2: use official tagged ksonnet release
ENV KSONNET_VERSION=0.11.0
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks
# Install helm
ENV HELM_VERSION=2.9.1
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar -C /tmp/ -xf helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv /tmp/linux-amd64/helm /usr/local/bin/helm
# Install kustomize
ENV KUSTOMIZE_VERSION=1.0.8
RUN curl -L -o /usr/local/bin/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/kustomize
ENV AWS_IAM_AUTHENTICATOR_VERSION=0.3.0
RUN curl -L -o /usr/local/bin/aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.3.0/heptio-authenticator-aws_${AWS_IAM_AUTHENTICATOR_VERSION}_linux_amd64 && \
chmod +x /usr/local/bin/aws-iam-authenticator
RUN ./install.sh helm-linux && \
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
####################################################################################################
# Argo CD Base - used as the base for both the release and dev argocd images
# ArgoCD Build stage which performs the actual build of ArgoCD binaries
####################################################################################################
FROM $BASE_IMAGE AS argocd-base
FROM golang:1.10.3 as argocd-build
USER root
COPY --from=builder /usr/local/bin/dep /usr/local/bin/dep
COPY --from=builder /usr/local/bin/packr /usr/local/bin/packr
ENV DEBIAN_FRONTEND=noninteractive
# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep
# to install all the packages of our dep lock file
COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml
COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock
RUN groupadd -g 999 argocd && \
useradd -r -u 999 -g argocd argocd && \
mkdir -p /home/argocd && \
chown argocd:0 /home/argocd && \
chmod g=u /home/argocd && \
apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y \
git git-lfs tini gpg tzdata && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY hack/gpg-wrapper.sh /usr/local/bin/gpg-wrapper.sh
COPY hack/git-verify-wrapper.sh /usr/local/bin/git-verify-wrapper.sh
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
# keep uid_entrypoint.sh for backward compatibility
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
# support for mounting configuration from a configmap
WORKDIR /app/config/ssh
RUN touch ssh_known_hosts && \
ln -s /app/config/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts
WORKDIR /app/config
RUN mkdir -p tls && \
mkdir -p gpg/source && \
mkdir -p gpg/keys && \
chown argocd gpg/keys && \
chmod 0700 gpg/keys
ENV USER=argocd
USER 999
WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/node:12.18.4 AS argocd-ui
WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]
RUN yarn install --network-timeout 200000 && \
yarn cache clean
COPY ["ui/", "."]
ARG ARGO_VERSION=latest
ENV ARGO_VERSION=$ARGO_VERSION
ARG TARGETARCH
RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OPTIONS=--max_old_space_size=8192 yarn build
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.18 AS argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd
COPY go.* ./
RUN go mod download
RUN cd ${GOPATH}/src/dummy && \
dep ensure -vendor-only && \
mv vendor/* ${GOPATH}/src/ && \
rmdir vendor
# Perform the build
WORKDIR /go/src/github.com/argoproj/argo-cd
COPY . .
COPY --from=argocd-ui /src/dist/app /go/src/github.com/argoproj/argo-cd/ui/dist/app
ARG TARGETOS
ARG TARGETARCH
RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make argocd-all
ARG MAKE_TARGET="cli server controller repo-server argocd-util"
RUN make ${MAKE_TARGET}
####################################################################################################
# Final image
####################################################################################################
FROM argocd-base
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
FROM debian:9.5-slim
USER root
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-cmp-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
RUN groupadd -g 999 argocd && \
useradd -r -u 999 -g argocd argocd && \
mkdir -p /home/argocd && \
chown argocd:argocd /home/argocd && \
apt-get update && \
apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
USER 999
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
COPY --from=builder /usr/local/bin/aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=argocd
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/* /usr/local/bin/
# Symlink argocd binaries under root for backwards compatibility that expect it under /
RUN ln -s /usr/local/bin/argocd /argocd && \
ln -s /usr/local/bin/argocd-server /argocd-server && \
ln -s /usr/local/bin/argocd-util /argocd-util && \
ln -s /usr/local/bin/argocd-application-controller /argocd-application-controller && \
ln -s /usr/local/bin/argocd-repo-server /argocd-repo-server
USER argocd
RUN helm init --client-only
WORKDIR /home/argocd
ARG BINARY
CMD ${BINARY}

View File

@@ -1,15 +0,0 @@
####################################################################################################
# argocd-dev
####################################################################################################
FROM argocd-base
COPY argocd /usr/local/bin/
USER root
RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-repo-server && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-application-controller && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller
USER 999

1565
Gopkg.lock generated Normal file

File diff suppressed because it is too large Load Diff

76
Gopkg.toml Normal file
View File

@@ -0,0 +1,76 @@
# Packages should only be added to the following list when we use them *outside* of our go code.
# (e.g. we want to build the binary to invoke as part of the build process, such as in
# generate-proto.sh). Normal use of golang packages should be added via `dep ensure`, and pinned
# with a [[constraint]] or [[override]] when version is important.
required = [
"github.com/golang/protobuf/protoc-gen-go",
"github.com/gogo/protobuf/protoc-gen-gofast",
"github.com/gogo/protobuf/protoc-gen-gogofast",
"k8s.io/code-generator/cmd/go-to-protobuf",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway",
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger",
]
[[constraint]]
name = "google.golang.org/grpc"
version = "1.15.0"
[[constraint]]
name = "github.com/gogo/protobuf"
version = "1.1.1"
# override github.com/grpc-ecosystem/go-grpc-middleware's constraint on master
[[override]]
name = "github.com/golang/protobuf"
version = "1.2.0"
[[constraint]]
name = "github.com/grpc-ecosystem/grpc-gateway"
version = "v1.3.1"
# prometheus does not believe in semversioning yet
[[constraint]]
name = "github.com/prometheus/client_golang"
revision = "7858729281ec582767b20e0d696b6041d995d5e0"
[[constraint]]
branch = "release-1.10"
name = "k8s.io/api"
# override ksonnet dependency
[[override]]
branch = "release-1.10"
name = "k8s.io/apimachinery"
[[constraint]]
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.10"
[[constraint]]
branch = "release-1.10"
name = "k8s.io/code-generator"
[[constraint]]
branch = "release-7.0"
name = "k8s.io/client-go"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.1"
[[constraint]]
name = "github.com/ksonnet/ksonnet"
version = "v0.11.0"
[[constraint]]
name = "github.com/gobuffalo/packr"
version = "v1.11.0"
# override ksonnet's logrus dependency
[[override]]
name = "github.com/sirupsen/logrus"
revision = "ea8897e79973357ba785ac2533559a6297e83c44"
[[constraint]]
branch = "master"
name = "github.com/argoproj/pkg"

556
Makefile
View File

@@ -1,152 +1,33 @@
PACKAGE=github.com/argoproj/argo-cd/v2/common
PACKAGE=github.com/argoproj/argo-cd
CURRENT_DIR=$(shell pwd)
DIST_DIR=${CURRENT_DIR}/dist
CLI_NAME=argocd
BIN_NAME=argocd
GEN_RESOURCES_CLI_NAME=argocd-resources-gen
HOST_OS:=$(shell go env GOOS)
HOST_ARCH:=$(shell go env GOARCH)
VERSION=$(shell cat ${CURRENT_DIR}/VERSION)
BUILD_DATE=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
GIT_COMMIT=$(shell git rev-parse HEAD)
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
VOLUME_MOUNT=$(shell if test "$(go env GOOS)" = "darwin"; then echo ":delegated"; elif test selinuxenabled; then echo ":delegated"; else echo ""; fi)
KUBECTL_VERSION=$(shell go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)
GOPATH?=$(shell if test -x `which go`; then go env GOPATH; else echo "$(HOME)/go"; fi)
GOCACHE?=$(HOME)/.cache/go-build
DOCKER_SRCDIR?=$(GOPATH)/src
DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
ARGOCD_PROCFILE?=Procfile
# Strict mode has been disabled in latest versions of mkdocs-material.
# Thus pointing to the older image of mkdocs-material matching the version used by argo-cd.
MKDOCS_DOCKER_IMAGE?=squidfunk/mkdocs-material:4.1.1
MKDOCS_RUN_ARGS?=
# Configuration for building argocd-test-tools image
TEST_TOOLS_NAMESPACE?=
TEST_TOOLS_IMAGE=argocd-test-tools
TEST_TOOLS_TAG?=latest
ifdef TEST_TOOLS_NAMESPACE
TEST_TOOLS_PREFIX=${TEST_TOOLS_NAMESPACE}/
endif
# You can change the ports where ArgoCD components will be listening on by
# setting the appropriate environment variables before running make.
ARGOCD_E2E_APISERVER_PORT?=8080
ARGOCD_E2E_REPOSERVER_PORT?=8081
ARGOCD_E2E_REDIS_PORT?=6379
ARGOCD_E2E_DEX_PORT?=5556
ARGOCD_E2E_YARN_HOST?=localhost
ARGOCD_E2E_DISABLE_AUTH?=
ARGOCD_E2E_TEST_TIMEOUT?=30m
ARGOCD_IN_CI?=false
ARGOCD_TEST_E2E?=true
ARGOCD_BIN_MODE?=true
ARGOCD_LINT_GOGC?=20
# Depending on where we are (legacy or non-legacy pwd), we need to use
# different Docker volume mounts for our source tree
LEGACY_PATH=$(GOPATH)/src/github.com/argoproj/argo-cd
ifeq ("$(PWD)","$(LEGACY_PATH)")
DOCKER_SRC_MOUNT="$(DOCKER_SRCDIR):/go/src$(VOLUME_MOUNT)"
else
DOCKER_SRC_MOUNT="$(PWD):/go/src/github.com/argoproj/argo-cd$(VOLUME_MOUNT)"
endif
# Runs any command in the argocd-test-utils container in server mode
# Server mode container will start with uid 0 and drop privileges during runtime
define run-in-test-server
docker run --rm -it \
--name argocd-test-server \
-u $(shell id -u):$(shell id -g) \
-e USER_ID=$(shell id -u) \
-e HOME=/home/user \
-e GOPATH=/go \
-e GOCACHE=/tmp/go-build-cache \
-e ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
-e ARGOCD_E2E_TEST=$(ARGOCD_E2E_TEST) \
-e ARGOCD_E2E_YARN_HOST=$(ARGOCD_E2E_YARN_HOST) \
-e ARGOCD_E2E_DISABLE_AUTH=$(ARGOCD_E2E_DISABLE_AUTH) \
-e ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} \
-e ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} \
-e ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} \
-v ${DOCKER_SRC_MOUNT} \
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
-v /tmp:/tmp${VOLUME_MOUNT} \
-w ${DOCKER_WORKDIR} \
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
-p 4000:4000 \
-p 5000:5000 \
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
bash -c "$(1)"
endef
# Runs any command in the argocd-test-utils container in client mode
define run-in-test-client
docker run --rm -it \
--name argocd-test-client \
-u $(shell id -u):$(shell id -g) \
-e HOME=/home/user \
-e GOPATH=/go \
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
-e GOCACHE=/tmp/go-build-cache \
-e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \
-v ${DOCKER_SRC_MOUNT} \
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
-v /tmp:/tmp${VOLUME_MOUNT} \
-w ${DOCKER_WORKDIR} \
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
bash -c "$(1)"
endef
#
define exec-in-test-server
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
endef
PATH:=$(PATH):$(PWD)/hack
# docker image publishing options
DOCKER_PUSH?=false
IMAGE_NAMESPACE?=
# perform static compilation
STATIC_BUILD?=true
# build development images
DEV_IMAGE?=false
ARGOCD_GPG_ENABLED?=true
ARGOCD_E2E_APISERVER_PORT?=8080
PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run vendor/github.com/gobuffalo/packr/packr/main.go"; fi)
override LDFLAGS += \
-X ${PACKAGE}.version=${VERSION} \
-X ${PACKAGE}.buildDate=${BUILD_DATE} \
-X ${PACKAGE}.gitCommit=${GIT_COMMIT} \
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}\
-X ${PACKAGE}.kubectlVersion=${KUBECTL_VERSION}
ifeq (${STATIC_BUILD}, true)
override LDFLAGS += -extldflags "-static"
endif
-X ${PACKAGE}.gitTreeState=${GIT_TREE_STATE}
# docker image publishing options
DOCKER_PUSH=false
IMAGE_TAG=latest
ifneq (${GIT_TAG},)
IMAGE_TAG=${GIT_TAG}
LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
else
IMAGE_TAG?=latest
endif
ifneq (${IMAGE_NAMESPACE},)
override LDFLAGS += -X ${PACKAGE}/install.imageNamespace=${IMAGE_NAMESPACE}
endif
ifneq (${IMAGE_TAG},)
override LDFLAGS += -X ${PACKAGE}/install.imageTag=${IMAGE_TAG}
endif
ifeq (${DOCKER_PUSH},true)
@@ -160,292 +41,101 @@ IMAGE_PREFIX=${IMAGE_NAMESPACE}/
endif
.PHONY: all
all: cli image
# We have some legacy requirements for being checked out within $GOPATH.
# The ensure-gopath target can be used as dependency to ensure we are running
# within these boundaries.
.PHONY: ensure-gopath
ensure-gopath:
ifneq ("$(PWD)","$(LEGACY_PATH)")
@echo "Due to legacy requirements for codegen, repository needs to be checked out within \$$GOPATH"
@echo "Location of this repo should be '$(LEGACY_PATH)' but is '$(PWD)'"
@exit 1
endif
.PHONY: gogen
gogen: ensure-gopath
export GO111MODULE=off
go generate ./util/argo/...
all: cli server-image controller-image repo-server-image argocd-util
.PHONY: protogen
protogen: ensure-gopath mod-vendor-local
export GO111MODULE=off
protogen:
./hack/generate-proto.sh
.PHONY: openapigen
openapigen: ensure-gopath
export GO111MODULE=off
./hack/update-openapi.sh
.PHONY: notification-catalog
notification-catalog:
go run ./hack/gen-catalog catalog
.PHONY: notification-docs
notification-docs:
go run ./hack/gen-docs
go run ./hack/gen-catalog docs
.PHONY: clientgen
clientgen: ensure-gopath
export GO111MODULE=off
clientgen:
./hack/update-codegen.sh
.PHONY: clidocsgen
clidocsgen: ensure-gopath
go run tools/cmd-docs/main.go
.PHONY: codegen-local
codegen-local: ensure-gopath mod-vendor-local notification-docs notification-catalog gogen protogen clientgen openapigen clidocsgen manifests-local
rm -rf vendor/
.PHONY: codegen
codegen: test-tools-image
$(call run-in-test-client,make codegen-local)
codegen: protogen clientgen
# NOTE: we use packr to do the build instead of go, since we embed .yaml files into the go binary.
# This enables ease of maintenance of the yaml files.
.PHONY: cli
cli: test-tools-image
$(call run-in-test-client, GOOS=${HOST_OS} GOARCH=${HOST_ARCH} make cli-local)
cli: clean-debug
${PACKR_CMD} build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
.PHONY: cli-local
cli-local: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
.PHONY: cli-linux
cli-linux: clean-debug
docker build --iidfile /tmp/argocd-linux-id --target argocd-build --build-arg MAKE_TARGET="cli IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-linux-amd64" .
docker create --name tmp-argocd-linux `cat /tmp/argocd-linux-id`
docker cp tmp-argocd-linux:/go/src/github.com/argoproj/argo-cd/dist/argocd-linux-amd64 dist/
docker rm tmp-argocd-linux
.PHONY: gen-resources-cli-local
gen-resources-cli-local: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${GEN_RESOURCES_CLI_NAME} ./hack/gen-resources/cmd
.PHONY: cli-darwin
cli-darwin: clean-debug
docker build --iidfile /tmp/argocd-darwin-id --target argocd-build --build-arg MAKE_TARGET="cli GOOS=darwin IMAGE_TAG=$(IMAGE_TAG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) CLI_NAME=argocd-darwin-amd64" .
docker create --name tmp-argocd-darwin `cat /tmp/argocd-darwin-id`
docker cp tmp-argocd-darwin:/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64 dist/
docker rm tmp-argocd-darwin
.PHONY: release-cli
release-cli: clean-debug build-ui
make BIN_NAME=argocd-darwin-amd64 GOOS=darwin argocd-all
make BIN_NAME=argocd-darwin-arm64 GOOS=darwin GOARCH=arm64 argocd-all
make BIN_NAME=argocd-linux-amd64 GOOS=linux argocd-all
make BIN_NAME=argocd-linux-arm64 GOOS=linux GOARCH=arm64 argocd-all
make BIN_NAME=argocd-linux-ppc64le GOOS=linux GOARCH=ppc64le argocd-all
make BIN_NAME=argocd-linux-s390x GOOS=linux GOARCH=s390x argocd-all
make BIN_NAME=argocd-windows-amd64.exe GOOS=windows argocd-all
.PHONY: test-tools-image
test-tools-image:
docker build --build-arg UID=$(shell id -u) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
.PHONY: manifests-local
manifests-local:
./hack/update-manifests.sh
.PHONY: argocd-util
argocd-util: clean-debug
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS} -extldflags "-static"' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
.PHONY: manifests
manifests: test-tools-image
$(call run-in-test-client,make manifests-local IMAGE_NAMESPACE='${IMAGE_NAMESPACE}' IMAGE_TAG='${IMAGE_TAG}')
# consolidated binary for cli, util, server, repo-server, controller
.PHONY: argocd-all
argocd-all: clean-debug
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
manifests:
./hack/update-manifests.sh
.PHONY: server
server: clean-debug
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
.PHONY: server-image
server-image:
docker build --build-arg BINARY=argocd-server -t $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-server:$(IMAGE_TAG) ; fi
.PHONY: repo-server
repo-server:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
.PHONY: repo-server-image
repo-server-image:
docker build --build-arg BINARY=argocd-repo-server -t $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-repo-server:$(IMAGE_TAG) ; fi
.PHONY: controller
controller:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-application-controller ./cmd/argocd-application-controller
.PHONY: build-ui
build-ui:
DOCKER_BUILDKIT=1 docker build -t argocd-ui --target argocd-ui .
find ./ui/dist -type f -not -name gitkeep -delete
docker run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
.PHONY: controller-image
controller-image:
docker build --build-arg BINARY=argocd-application-controller -t $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-application-controller:$(IMAGE_TAG) ; fi
.PHONY: image
ifeq ($(DEV_IMAGE), true)
# The "dev" image builds the binaries from the users desktop environment (instead of in Docker)
# which speeds up builds. Dockerfile.dev needs to be copied into dist to perform the build, since
# the dist directory is under .dockerignore.
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
image: build-ui
DOCKER_BUILDKIT=1 docker build -t argocd-base --target argocd-base .
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
cp Dockerfile.dev dist
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
else
image:
DOCKER_BUILDKIT=1 docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) .
endif
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
.PHONY: armimage
armimage:
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
.PHONY: cli-image
cli-image:
docker build --build-arg BINARY=argocd -t $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argocd-cli:$(IMAGE_TAG) ; fi
.PHONY: builder-image
builder-image:
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) ; fi
.PHONY: mod-download
mod-download: test-tools-image
$(call run-in-test-client,go mod download)
.PHONY: mod-download-local
mod-download-local:
go mod download && go mod tidy # go mod download changes go.sum https://github.com/golang/go/issues/42970
.PHONY: mod-vendor
mod-vendor: test-tools-image
$(call run-in-test-client,go mod vendor)
.PHONY: mod-vendor-local
mod-vendor-local: mod-download-local
go mod vendor
# Deprecated - replace by install-local-tools
.PHONY: install-lint-tools
install-lint-tools:
./hack/install.sh lint-tools
# Run linter on the code
.PHONY: lint
lint: test-tools-image
$(call run-in-test-client,make lint-local)
lint:
gometalinter.v2 --config gometalinter.json ./...
# Run linter on the code (local version)
.PHONY: lint-local
lint-local:
golangci-lint --version
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose --timeout 300s
.PHONY: lint-ui
lint-ui: test-tools-image
$(call run-in-test-client,make lint-ui-local)
.PHONY: lint-ui-local
lint-ui-local:
cd ui && yarn lint
# Build all Go code
.PHONY: build
build: test-tools-image
mkdir -p $(GOCACHE)
$(call run-in-test-client, make build-local)
# Build all Go code (local version)
.PHONY: build-local
build-local:
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
# Run all unit tests
#
# If TEST_MODULE is set (to fully qualified module name), only this specific
# module will be tested.
.PHONY: test
test: test-tools-image
mkdir -p $(GOCACHE)
$(call run-in-test-client,make TEST_MODULE=$(TEST_MODULE) test-local)
test:
go test -v `go list ./... | grep -v "github.com/argoproj/argo-cd/test/e2e"`
# Run all unit tests (local version)
.PHONY: test-local
test-local:
if test "$(TEST_MODULE)" = ""; then \
./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
else \
./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
.PHONY: test-coverage
test-coverage:
go test -v -covermode=count -coverprofile=coverage.out `go list ./... | grep -v "github.com/argoproj/argo-cd/test/e2e"`
@if [ "$(COVERALLS_TOKEN)" != "" ] ; then goveralls -ignore `find . -name '*.pb*.go' | grep -v vendor/ | sed 's!^./!!' | paste -d, -s -` -coverprofile=coverage.out -service=argo-ci -repotoken "$(COVERALLS_TOKEN)"; else echo 'No COVERALLS_TOKEN env var specified. Skipping submission to Coveralls.io'; fi
.PHONY: test-race
test-race: test-tools-image
mkdir -p $(GOCACHE)
$(call run-in-test-client,make TEST_MODULE=$(TEST_MODULE) test-race-local)
# Run all unit tests, with data race detection, skipping known failures (local version)
.PHONY: test-race-local
test-race-local:
if test "$(TEST_MODULE)" = ""; then \
./hack/test.sh -race -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
else \
./hack/test.sh -race -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
# Run the E2E test suite. E2E test servers (see start-e2e target) must be
# started before.
.PHONY: test-e2e
test-e2e:
$(call exec-in-test-server,make test-e2e-local)
go test -v -failfast -timeout 20m ./test/e2e
# Run the E2E test suite (local version)
.PHONY: test-e2e-local
test-e2e-local: cli-local
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
export GO111MODULE=off
ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v ./test/e2e
# Spawns a shell in the test server container for debugging purposes
debug-test-server: test-tools-image
$(call run-in-test-server,/bin/bash)
# Spawns a shell in the test client container for debugging purposes
debug-test-client: test-tools-image
$(call run-in-test-client,/bin/bash)
# Starts e2e server in a container
.PHONY: start-e2e
start-e2e: test-tools-image
docker version
mkdir -p ${GOCACHE}
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-e2e-local)
# Starts e2e server locally (or within a container)
.PHONY: start-e2e-local
start-e2e-local: mod-vendor-local dep-ui-local cli-local
kubectl create ns argocd-e2e || true
kubectl config set-context --current --namespace=argocd-e2e
kustomize build test/manifests/base | kubectl apply -f -
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
# Create GPG keys and source directories
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
# set paths for locally managed ssh known hosts and tls certs data
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
ARGOCD_E2E_DISABLE_AUTH=false \
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
BIN_MODE=$(ARGOCD_BIN_MODE) \
ARGOCD_E2E_TEST=true \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in by golang embed
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
.PHONY: clean-debug
clean-debug:
-find ${CURRENT_DIR} -name debug.test | xargs rm -f
@@ -454,40 +144,8 @@ clean-debug:
clean: clean-debug
-rm -rf ${CURRENT_DIR}/dist
.PHONY: start
start: test-tools-image
docker version
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-local ARGOCD_START=${ARGOCD_START})
# Starts a local instance of ArgoCD
.PHONY: start-local
start-local: mod-vendor-local dep-ui-local
# check we can connect to Docker to start Redis
killall goreman || true
kubectl create ns argocd || true
rm -rf /tmp/argocd-local
mkdir -p /tmp/argocd-local
mkdir -p /tmp/argocd-local/gpg/keys && chmod 0700 /tmp/argocd-local/gpg/keys
mkdir -p /tmp/argocd-local/gpg/source
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=false \
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
ARGOCD_E2E_TEST=false \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
# Run goreman start with exclude option , provide exclude env variable with list of services
.PHONY: run
run:
bash ./hack/goreman-start.sh
# Runs pre-commit validation with the virtualized toolchain
.PHONY: pre-commit
pre-commit: codegen build lint test
# Runs pre-commit validation with the local toolchain
.PHONY: pre-commit-local
pre-commit-local: codegen-local build-local lint-local test-local
.PHONY: precheckin
precheckin: test lint
.PHONY: release-precheck
release-precheck: manifests
@@ -496,76 +154,4 @@ release-precheck: manifests
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
.PHONY: release
release: pre-commit release-precheck image release-cli
.PHONY: build-docs-local
build-docs-local:
mkdocs build
.PHONY: build-docs
build-docs:
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} build
.PHONY: serve-docs-local
serve-docs-local:
mkdocs serve
.PHONY: serve-docs
serve-docs:
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000
# Verify that kubectl can connect to your K8s cluster from Docker
.PHONY: verify-kube-connect
verify-kube-connect: test-tools-image
$(call run-in-test-client,kubectl version)
# Show the Go version of local and virtualized environments
.PHONY: show-go-version
show-go-version: test-tools-image
@echo -n "Local Go version: "
@go version
@echo -n "Docker Go version: "
$(call run-in-test-client,go version)
# Installs all tools required to build and test ArgoCD locally
.PHONY: install-tools-local
install-tools-local: install-test-tools-local install-codegen-tools-local install-go-tools-local
# Installs all tools required for running unit & end-to-end tests (Linux packages)
.PHONY: install-test-tools-local
install-test-tools-local:
./hack/install.sh kustomize
./hack/install.sh helm-linux
# Installs all tools required for running codegen (Linux packages)
.PHONY: install-codegen-tools-local
install-codegen-tools-local:
./hack/install.sh codegen-tools
# Installs all tools required for running codegen (Go packages)
.PHONY: install-go-tools-local
install-go-tools-local:
./hack/install.sh codegen-go-tools
.PHONY: dep-ui
dep-ui: test-tools-image
$(call run-in-test-client,make dep-ui-local)
dep-ui-local:
cd ui && yarn install
start-test-k8s:
go run ./hack/k8s
.PHONY: list
list:
@LC_ALL=C $(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$'
.PHONY: applicationset-controller
applicationset-controller:
CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-applicationset-controller ./cmd
.PHONY: checksums
checksums:
sha256sum ./dist/$(BIN_NAME)-* | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BIN_NAME)-$(TARGET_VERSION)-checksums.txt
release: release-precheck precheckin cli-darwin cli-linux server-image controller-image repo-server-image cli-image

23
OWNERS
View File

@@ -3,27 +3,6 @@ owners:
- jessesuen
approvers:
- alexec
- alexmt
- jannfis
- jessesuen
- jgwest
- keithchong
- mayzhang2000
- rbreeze
- leoluz
- crenshaw-dev
- pasha-codefresh
reviewers:
- dthomson25
- tetchel
- terrytangyuan
- wtam2018
- ishitasequeira
- reginapizza
- hblixt
- chetan-rns
- wanghong230
- ciiay
- saumeya
- merenbach

View File

@@ -1,12 +1,4 @@
controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
api-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:v2.30.2 dex serve /dex.yaml"
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" == 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:7.0.0-alpine --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
repo-server: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
cmp-server: [ "$ARGOCD_E2E_TEST" == 'true' ] && exit 0 || [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
git-server: test/fixture/testrepos/start-git.sh
helm-registry: test/fixture/testrepos/start-helm-registry.sh
dev-mounter: [[ "$ARGOCD_E2E_TEST" != "true" ]] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
applicationset-controller: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_ASK_PASS_SOCK=/tmp/applicationset-ask-pass.sock ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
notification: [ "$BIN_MODE" == 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug"
controller: go run ./cmd/argocd-application-controller/main.go
api-server: go run ./cmd/argocd-server/main.go --insecure --disable-auth
repo-server: go run ./cmd/argocd-repo-server/main.go --loglevel debug
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -p 5557:5557 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/coreos/dex:v2.10.0 serve /dex.yaml"

107
README.md
View File

@@ -1,4 +1,4 @@
[![Integration tests](https://github.com/argoproj/argo-cd/workflows/Integration%20tests/badge.svg?branch=master)](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd) [![Release Version](https://img.shields.io/github/v/release/argoproj/argo-cd?label=argo-cd)](https://github.com/argoproj/argo-cd/releases/latest) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4486/badge)](https://bestpractices.coreinfrastructure.org/projects/4486) [![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)
[![Coverage Status](https://coveralls.io/repos/github/argoproj/argo-cd/badge.svg?branch=master)](https://coveralls.io/github/argoproj/argo-cd?branch=master)
# Argo CD - Declarative Continuous Delivery for Kubernetes
@@ -6,69 +6,72 @@
Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
![Argo CD UI](docs/assets/argocd-ui.gif)
[![Argo CD Demo](https://img.youtube.com/vi/0WAm0y2vLIo/0.jpg)](https://youtu.be/0WAm0y2vLIo)
![Argo CD UI](docs/argocd-ui.gif)
## Why Argo CD?
1. Application definitions, configurations, and environments should be declarative and version controlled.
1. Application deployment and lifecycle management should be automated, auditable, and easy to understand.
Application definitions, configurations, and environments should be declarative and version controlled.
Application deployment and lifecycle management should be automated, auditable, and easy to understand.
## Who uses Argo CD?
## Getting Started
[Official Argo CD user list](USERS.md)
Follow our [getting started guide](docs/getting_started.md). Further [documentation](docs/)
is provided for additional features.
## Documentation
## How it works
To learn more about Argo CD [go to the complete documentation](https://argo-cd.readthedocs.io/).
Check live demo at https://cd.apps.argoproj.io/.
Argo CD follows the **GitOps** pattern of using git repositories as the source of truth for defining
the desired application state. Kubernetes manifests can be specified in several ways:
* [ksonnet](https://ksonnet.io) applications
* [kustomize](https://kustomize.io) applications
* [helm](https://helm.sh) charts
* Plain directory of YAML/json manifests
## Community
Argo CD automates the deployment of the desired application states in the specified target environments.
Application deployments can track updates to branches, tags, or pinned to a specific version of
manifests at a git commit. See [tracking strategies](docs/tracking_strategies.md) for additional
details about the different tracking strategies available.
### Contribution, Discussion and Support
You can reach the Argo CD community and developers via the following channels:
* Q & A : [Github Discussions](https://github.com/argoproj/argo-cd/discussions)
* Chat : [The #argo-cd Slack channel](https://argoproj.github.io/community/join-slack)
* Contributors Office Hours: [Every Thursday](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
* User Community meeting: [First Wednesday of the month](https://calendar.google.com/calendar/u/0/embed?src=argoproj@gmail.com) | [Agenda](https://docs.google.com/document/d/1ttgw98MO45Dq7ZUHpIiOIEfbyeitKHNfMjbY5dLLMKQ)
For a quick 10 minute overview of ArgoCD, check out the demo presented to the Sig Apps community
meeting:
[![Alt text](https://img.youtube.com/vi/aWDIQMbp1cc/0.jpg)](https://youtu.be/aWDIQMbp1cc?t=1m4s)
Participation in the Argo CD project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
## Architecture
### Blogs and Presentations
![Argo CD Architecture](docs/argocd_architecture.png)
1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://blog.akuity.io/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argo-cd-7c5b4057ee49)
1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)
1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)
1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)
1. [Couchbase - How To Run a Database Cluster in Kubernetes Using Argo CD](https://youtu.be/nkPoPaVzExY)
1. [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)
1. [Environments Based On Pull Requests (PRs): Using Argo CD To Apply GitOps Principles On Previews](https://youtu.be/cpAaI8p4R60)
1. [Argo CD: Applying GitOps Principles To Manage Production Environment In Kubernetes](https://youtu.be/vpWQeoaiRM4)
1. [Creating Temporary Preview Environments Based On Pull Requests With Argo CD And Codefresh](https://codefresh.io/continuous-deployment/creating-temporary-preview-environments-based-pull-requests-argo-cd-codefresh/)
1. [Tutorial: Everything You Need To Become A GitOps Ninja](https://www.youtube.com/watch?v=r50tRQjisxw) 90m tutorial on GitOps and Argo CD.
1. [Comparison of Argo CD, Spinnaker, Jenkins X, and Tekton](https://www.inovex.de/blog/spinnaker-vs-argo-cd-vs-tekton-vs-jenkins-x/)
1. [Simplify and Automate Deployments Using GitOps with IBM Multicloud Manager 3.1.2](https://www.ibm.com/cloud/blog/simplify-and-automate-deployments-using-gitops-with-ibm-multicloud-manager-3-1-2)
1. [GitOps for Kubeflow using Argo CD](https://v0-6.kubeflow.org/docs/use-cases/gitops-for-kubeflow/)
1. [GitOps Toolsets on Kubernetes with CircleCI and Argo CD](https://www.digitalocean.com/community/tutorials/webinar-series-gitops-tool-sets-on-kubernetes-with-circleci-and-argo-cd)
1. [CI/CD in Light Speed with K8s and Argo CD](https://www.youtube.com/watch?v=OdzH82VpMwI&feature=youtu.be)
1. [Machine Learning as Code](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU). Among other things, describes how Kubeflow uses Argo CD to implement GitOPs for ML
1. [Argo CD - GitOps Continuous Delivery for Kubernetes](https://www.youtube.com/watch?v=aWDIQMbp1cc&feature=youtu.be&t=1m4s)
1. [Introduction to Argo CD : Kubernetes DevOps CI/CD](https://www.youtube.com/watch?v=2WSJF7d8dUg&feature=youtu.be)
1. [GitOps Deployment and Kubernetes - using Argo CD](https://medium.com/riskified-technology/gitops-deployment-and-kubernetes-f1ab289efa4b)
1. [Deploy Argo CD with Ingress and TLS in Three Steps: No YAML Yak Shaving Required](https://itnext.io/deploy-argo-cd-with-ingress-and-tls-in-three-steps-no-yaml-yak-shaving-required-bc536d401491)
1. [GitOps Continuous Delivery with Argo and Codefresh](https://codefresh.io/events/cncf-member-webinar-gitops-continuous-delivery-argo-codefresh/)
1. [Stay up to date with Argo CD and Renovate](https://mjpitz.com/blog/2020/12/03/renovate-your-gitops/)
1. [Setting up Argo CD with Helm](https://www.arthurkoziel.com/setting-up-argocd-with-helm/)
1. [Applied GitOps with Argo CD](https://thenewstack.io/applied-gitops-with-argocd/)
1. [Solving configuration drift using GitOps with Argo CD](https://www.cncf.io/blog/2020/12/17/solving-configuration-drift-using-gitops-with-argo-cd/)
1. [Decentralized GitOps over environments](https://blogs.sap.com/2021/05/06/decentralized-gitops-over-environments/)
1. [How GitOps and Operators mark the rise of Infrastructure-As-Software](https://paytmlabs.com/blog/2021/10/how-to-improve-operational-work-with-operators-and-gitops/)
1. [Getting Started with ArgoCD for GitOps Deployments](https://youtu.be/AvLuplh1skA)
1. [Using Argo CD & Datree for Stable Kubernetes CI/CD Deployments](https://youtu.be/17894DTru2Y)
Argo CD is implemented as a kubernetes controller which continuously monitors running applications
and compares the current, live state against the desired target state (as specified in the git repo).
A deployed application whose live state deviates from the target state is considered `OutOfSync`.
Argo CD reports & visualizes the differences, while providing facilities to automatically or
manually sync the live state back to the desired target state. Any modifications made to the desired
target state in the git repo can be automatically applied and reflected in the specified target
environments.
For additional details, see [architecture overview](docs/architecture.md).
## Features
* Automated deployment of applications to specified target environments
* Flexibility in support for multiple config management tools (Ksonnet, Kustomize, Helm, plain-YAML)
* Continuous monitoring of deployed applications
* Automated or manual syncing of applications to its desired state
* Web and CLI based visualization of applications and differences between live vs. desired state
* Rollback/Roll-anywhere to any application state committed in the git repository
* Health assessment statuses on all components of the application
* SSO Integration (OIDC, OAuth2, LDAP, SAML 2.0, GitLab, Microsoft, LinkedIn)
* Webhook Integration (GitHub, BitBucket, GitLab)
* PreSync, Sync, PostSync hooks to support complex application rollouts (e.g.blue/green & canary upgrades)
* Audit trails for application events and API calls
* Parameter overrides for overriding ksonnet/helm parameters in git
* Service account/access key management for CI pipelines
## Development Status
* Argo CD is being used in production to deploy SaaS services at Intuit
## Roadmap
* Auto-sync toggle to directly apply git state changes to live state
* Revamped UI, and feature parity with CLI
* Customizable application actions

View File

@@ -1,76 +0,0 @@
# Security Policy for Argo CD
Version: **v1.4 (2022-01-23)**
## Preface
As a deployment tool, Argo CD needs to have production access which makes
security a very important topic. The Argoproj team takes security very
seriously and is continuously working on improving it.
## A word about security scanners
Many organisations these days employ security scanners to validate their
container images before letting them on their clusters, and that is a good
thing. However, the quality and results of these scanners vary greatly,
many of them produce false positives and require people to look at the
issues reported and validate them for correctness. A great example of that
is, that some scanners report kernel vulnerabilities for container images
just because they are derived from some distribution.
We kindly ask you to not raise issues or contact us regarding any issues
that are found by your security scanner. Many of those produce a lot of false
positives, and many of these issues don't affect Argo CD. We do have scanners
in place for our code, dependencies and container images that we publish. We
are well aware of the issues that may affect Argo CD and are constantly
working on the remediation of those that affect Argo CD and our users.
If you believe that we might have missed an issue that we should take a look
at (that can happen), then please discuss it with us. If there is a CVE
assigned to the issue, please do open an issue on our GitHub tracker instead
of writing to the security contact e-mail, since things reported by scanners
are public already and the discussion that might emerge is of benefit to the
general community. However, please validate your scanner results and its
impact on Argo CD before opening an issue at least roughly.
## Supported Versions
We currently support the most recent release (`N`, e.g. `1.8`) and the release
previous to the most recent one (`N-1`, e.g. `1.7`). With the release of
`N+1`, `N-1` drops out of support and `N` becomes `N-1`.
We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the
supported versions, which will contain fixes for security vulnerabilities and
important bugs. Prior releases might receive critical security fixes on a best
effort basis, however, it cannot be guaranteed that security fixes get
back-ported to these unsupported versions.
In rare cases, where a security fix needs complex re-design of a feature or is
otherwise very intrusive, and there's a workaround available, we may decide to
provide a forward-fix only, e.g. to be released the next minor release, instead
of releasing it within a patch branch for the currently supported releases.
## Reporting a Vulnerability
If you find a security related bug in ArgoCD, we kindly ask you for responsible
disclosure and for giving us appropriate time to react, analyze and develop a
fix to mitigate the found security vulnerability.
We will do our best to react quickly on your inquiry, and to coordinate a fix
and disclosure with you. Sometimes, it might take a little longer for us to
react (e.g. out of office conditions), so please bear with us in these cases.
We will publish security advisories using the
[Git Hub Security Advisories](https://github.com/argoproj/argo-cd/security/advisories)
feature to keep our community well informed, and will credit you for your
findings (unless you prefer to stay anonymous, of course).
Please report vulnerabilities by e-mail to the following address:
* cncf-argo-security@lists.cncf.io
## Securing your Argo CD Instance
See the [operator manual security page](docs/operator-manual/security.md) for
additional information about Argo CD's security features and how to make your
Argo CD production ready.

View File

@@ -1,8 +0,0 @@
# Defined below are the security contacts for this repo.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://argo-cd.readthedocs.io/en/latest/security_considerations/#reporting-vulnerabilities
alexmt
edlee2121
jessesuen

208
USERS.md
View File

@@ -1,208 +0,0 @@
## Who uses Argo CD?
As the Argo Community grows, we'd like to keep track of our users. Please send a PR with your organization name if you are using Argo CD.
Currently, the following organizations are **officially** using Argo CD:
1. [127Labs](https://127labs.com/)
1. [3Rein](https://www.3rein.com/)
1. [7shifts](https://www.7shifts.com/)
1. [Adevinta](https://www.adevinta.com/)
1. [Adventure](https://jp.adventurekk.com/)
1. [Akuity](https://akuity.io/)
1. [Alibaba Group](https://www.alibabagroup.com/)
1. [Allianz Direct](https://www.allianzdirect.de/)
1. [Ambassador Labs](https://www.getambassador.io/)
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
1. [Ant Group](https://www.antgroup.com/)
1. [AppDirect](https://www.appdirect.com)
1. [Arctiq Inc.](https://www.arctiq.ca)
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
1. [Axual B.V.](https://axual.com)
1. [Baloise](https://www.baloise.com)
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
1. [Beat](https://thebeat.co/en/)
1. [Beez Innovation Labs](https://www.beezlabs.com/)
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
1. [BigPanda](https://bigpanda.io)
1. [BioBox Analytics](https://biobox.io)
1. [BMW Group](https://www.bmwgroup.com/)
1. [Boozt](https://www.booztgroup.com/)
1. [Boticario](https://www.boticario.com.br/)
1. [Camptocamp](https://camptocamp.com)
1. [Capital One](https://www.capitalone.com)
1. [CARFAX](https://www.carfax.com)
1. [Casavo](https://casavo.com)
1. [Celonis](https://www.celonis.com/)
1. [Chargetrip](https://chargetrip.com)
1. [Chime](https://www.chime.com)
1. [Cisco ET&I](https://eti.cisco.com/)
1. [Cobalt](https://www.cobalt.io/)
1. [Codefresh](https://www.codefresh.io/)
1. [Codility](https://www.codility.com/)
1. [Commonbond](https://commonbond.co/)
1. [CROZ d.o.o.](https://croz.net/)
1. [Crédit Agricole CIB](https://www.ca-cib.com)
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
1. [Cybozu](https://cybozu-global.com)
1. [D2iQ](https://www.d2iq.com)
1. [Datarisk](https://www.datarisk.io/)
1. [Deloitte](https://www.deloitte.com/)
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
1. [EDF Renewables](https://www.edf-re.com/)
1. [edX](https://edx.org)
1. [Electronic Arts Inc. ](https://www.ea.com)
1. [Elium](https://www.elium.com)
1. [END.](https://www.endclothing.com/)
1. [Energisme](https://energisme.com/)
1. [Faro](https://www.faro.com/)
1. [Fave](https://myfave.com)
1. [Flip](https://flip.id)
1. [Fonoa](https://www.fonoa.com/)
1. [freee](https://corp.freee.co.jp/en/company/)
1. [Future PLC](https://www.futureplc.com/)
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
1. [Garner](https://www.garnercorp.com)
1. [Generali Deutschland AG](https://www.generali.de/)
1. [Gitpod](https://www.gitpod.io)
1. [Gllue](https://gllue.com)
1. [Glovo](https://www.glovoapp.com)
1. [GMETRI](https://gmetri.com/)
1. [Gojek](https://www.gojek.io/)
1. [Greenpass](https://www.greenpass.com.br/)
1. [Handelsbanken](https://www.handelsbanken.se)
1. [Healy](https://www.healyworld.net)
1. [Helio](https://helio.exchange)
1. [hipages](https://hipages.com.au/)
1. [Hiya](https://hiya.com)
1. [Honestbank](https://honestbank.com)
1. [IBM](https://www.ibm.com/)
1. [Ibotta](https://home.ibotta.com)
1. [IITS-Consulting](https://iits-consulting.de)
1. [imaware](https://imaware.health)
1. [Index Exchange](https://www.indexexchange.com/)
1. [InsideBoard](https://www.insideboard.com)
1. [Intuit](https://www.intuit.com/)
1. [Joblift](https://joblift.com/)
1. [JovianX](https://www.jovianx.com/)
1. [Kaltura](https://corp.kaltura.com/)
1. [KarrotPay](https://www.daangnpay.com/)
1. [Karrot](https://www.daangn.com/)
1. [Kasa](https://kasa.co.kr/)
1. [Keeeb](https://www.keeeb.com/)
1. [Keptn](https://keptn.sh)
1. [Kinguin](https://www.kinguin.net/)
1. [KintoHub](https://www.kintohub.com/)
1. [KompiTech GmbH](https://www.kompitech.com/)
1. [KubeSphere](https://github.com/kubesphere)
1. [LexisNexis](https://www.lexisnexis.com/)
1. [Lightricks](https://www.lightricks.com/)
1. [LINE](https://linecorp.com/en/)
1. [Lytt](https://www.lytt.co/)
1. [Majid Al Futtaim](https://www.majidalfuttaim.com/)
1. [Major League Baseball](https://mlb.com)
1. [Mambu](https://www.mambu.com/)
1. [MariaDB](https://mariadb.com)
1. [Mattermost](https://www.mattermost.com)
1. [Max Kelsen](https://www.maxkelsen.com/)
1. [MeDirect](https://medirect.com.mt/)
1. [Metanet](http://www.metanet.co.kr/en/)
1. [MindSpore](https://mindspore.cn)
1. [Mirantis](https://mirantis.com/)
1. [mixi Group](https://mixi.co.jp/)
1. [Moengage](https://www.moengage.com/)
1. [Money Forward](https://corp.moneyforward.com/en/)
1. [MOO Print](https://www.moo.com/)
1. [MTN Group](https://www.mtn.com/)
1. [Natura &Co](https://naturaeco.com/)
1. [New Relic](https://newrelic.com/)
1. [Nextdoor](https://nextdoor.com/)
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
1. [Nitro](https://gonitro.com)
1. [Octadesk](https://octadesk.com)
1. [omegaUp](https://omegaUp.com)
1. [openEuler](https://openeuler.org)
1. [openGauss](https://opengauss.org/)
1. [openLooKeng](https://openlookeng.io)
1. [OpenSaaS Studio](https://opensaas.studio)
1. [Opensurvey](https://www.opensurvey.co.kr/)
1. [Optoro](https://www.optoro.com/)
1. [Orbital Insight](https://orbitalinsight.com/)
1. [p3r](https://www.p3r.one/)
1. [Packlink](https://www.packlink.com/)
1. [PayPay](https://paypay.ne.jp/)
1. [Peloton Interactive](https://www.onepeloton.com/)
1. [Pipefy](https://www.pipefy.com/)
1. [Polarpoint.io](https://polarpoint.io)
1. [Preferred Networks](https://preferred.jp/en/)
1. [Prudential](https://prudential.com.sg)
1. [PUBG](https://www.pubg.com)
1. [Qonto](https://qonto.com)
1. [QuintoAndar](https://quintoandar.com.br)
1. [Quipper](https://www.quipper.com/)
1. [Recreation.gov](https://www.recreation.gov/)
1. [Red Hat](https://www.redhat.com/)
1. [RightRev](https://rightrev.com/)
1. [Rise](https://www.risecard.eu/)
1. [Riskified](https://www.riskified.com/)
1. [Robotinfra](https://www.robotinfra.com)
1. [Rubin Observatory](https://www.lsst.org)
1. [Saildrone](https://www.saildrone.com/)
1. [Saloodo! GmbH](https://www.saloodo.com)
1. [Sap Labs](http://sap.com)
1. [Schwarz IT](https://jobs.schwarz/it-mission)
1. [Skit](https://skit.ai/)
1. [Skyscanner](https://www.skyscanner.net/)
1. [Smilee.io](https://smilee.io)
1. [Snapp](https://snapp.ir/)
1. [Snyk](https://snyk.io/)
1. [Speee](https://speee.jp/)
1. [Spendesk](https://spendesk.com/)
1. [Spores Labs](https://spores.app)
1. [Stuart](https://stuart.com/)
1. [Sumo Logic](https://sumologic.com/)
1. [Sutpc](http://www.sutpc.com/)
1. [Swiss Post](https://github.com/swisspost)
1. [Swisscom](https://www.swisscom.ch)
1. [Swissquote](https://github.com/swissquote)
1. [Syncier](https://syncier.com/)
1. [TableCheck](https://tablecheck.com/)
1. [Tailor Brands](https://www.tailorbrands.com)
1. [Tamkeen Technologies](https://tamkeentech.sa/)
1. [Technacy](https://www.technacy.it/)
1. [Tesla](https://tesla.com/)
1. [ThousandEyes](https://www.thousandeyes.com/)
1. [Ticketmaster](https://ticketmaster.com)
1. [Tiger Analytics](https://www.tigeranalytics.com/)
1. [Tigera](https://www.tigera.io/)
1. [Toss](https://toss.im/en)
1. [tru.ID](https://tru.id)
1. [Twilio SendGrid](https://sendgrid.com)
1. [tZERO](https://www.tzero.com/)
1. [UBIO](https://ub.io/)
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
1. [ungleich.ch](https://ungleich.ch/)
1. [Unifonic Inc](https://www.unifonic.com/)
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
1. [Viaduct](https://www.viaduct.ai/)
1. [Virtuo](https://www.govirtuo.com/)
1. [VISITS Technologies](https://visits.world/en)
1. [Volvo Cars](https://www.volvocars.com/)
1. [VSHN - The DevOps Company](https://vshn.ch/)
1. [Walkbase](https://www.walkbase.com/)
1. [Webstores](https://www.webstores.nl)
1. [Wehkamp](https://www.wehkamp.nl/)
1. [WeMo Scooter](https://www.wemoscooter.com/)
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
1. [Witick](https://witick.io/)
1. [WooliesX](https://wooliesx.com.au/)
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
1. [WSpot](https://www.wspot.com.br/)
1. [Yieldlab](https://www.yieldlab.de/)
1. [Youverify](https://youverify.co/)
1. [Yubo](https://www.yubo.live/)
1. [Zimpler](https://www.zimpler.com/)
1. [ZOZO](https://corp.zozo.com/)
1. [Trendyol](https://www.trendyol.com/)
1. [RapidAPI](https://www.rapidapi.com/)

View File

@@ -1 +1 @@
2.4.23
0.9.2

View File

@@ -1,732 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
"github.com/argoproj/argo-cd/v2/applicationset/utils"
"github.com/argoproj/argo-cd/v2/common"
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/db"
)
const (
// Rather than importing the whole argocd-notifications controller, just copying the const here
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/subscriptions.go#L12
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
ReconcileRequeueOnValidationError = time.Minute * 3
)
var (
preservedAnnotations = []string{
NotifiedAnnotationKey,
argov1alpha1.AnnotationKeyRefresh,
}
)
// ApplicationSetReconciler reconciles a ApplicationSet object
type ApplicationSetReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder record.EventRecorder
Generators map[string]generators.Generator
ArgoDB db.ArgoDB
ArgoAppClientset appclientset.Interface
KubeClientset kubernetes.Interface
utils.Policy
utils.Renderer
}
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch
func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = r.Log.WithValues("applicationset", req.NamespacedName)
_ = log.WithField("applicationset", req.NamespacedName)
var applicationSetInfo argoprojiov1alpha1.ApplicationSet
parametersGenerated := false
if err := r.Get(ctx, req.NamespacedName, &applicationSetInfo); err != nil {
if client.IgnoreNotFound(err) != nil {
log.WithError(err).Infof("unable to get ApplicationSet: '%v' ", err)
}
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
return ctrl.Result{}, nil
}
// Log a warning if there are unrecognized generators
utils.CheckInvalidGenerators(&applicationSetInfo)
// desiredApplications is the main list of all expected Applications from all generators in this appset.
desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: string(applicationSetReason),
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
parametersGenerated = true
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo, req.Namespace)
if err != nil {
// While some generators may return an error that requires user intervention,
// other generators reference external resources that may change to cause
// the error to no longer occur. We thus log the error and requeue
// with a timeout to give this another shot at a later time.
//
// Changes to watched resources will cause this to be reconciled sooner than
// the RequeueAfter time.
log.Errorf("error occurred during application validation: %s", err.Error())
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
}
var validApps []argov1alpha1.Application
for i := range desiredApplications {
if validateErrors[i] == nil {
validApps = append(validApps, desiredApplications[i])
}
}
if len(validateErrors) > 0 {
var message string
for _, v := range validateErrors {
message = v.Error()
log.Errorf("validation error found during application validation: %s", message)
}
if len(validateErrors) > 1 {
// Only the last message gets added to the appset status, to keep the size reasonable.
message = fmt.Sprintf("%s (and %d more)", message, len(validateErrors)-1)
}
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: message,
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationValidationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
}
if r.Policy.Update() {
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonUpdateApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
} else {
err = r.createInCluster(ctx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonCreateApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
}
if r.Policy.Delete() {
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonDeleteApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
}
if applicationSetInfo.RefreshRequired() {
delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh)
err := r.Client.Update(ctx, &applicationSetInfo)
if err != nil {
log.Warnf("error occurred while updating ApplicationSet: %v", err)
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: err.Error(),
Reason: argoprojiov1alpha1.ApplicationSetReasonRefreshApplicationError,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
return ctrl.Result{}, err
}
}
requeueAfter := r.getMinRequeueAfter(&applicationSetInfo)
log.WithField("requeueAfter", requeueAfter).Info("end reconcile")
if len(validateErrors) == 0 {
if err := r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "All applications have been generated successfully",
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{
RequeueAfter: requeueAfter,
}, nil
}
func getParametersGeneratedCondition(parametersGenerated bool, message string) argoprojiov1alpha1.ApplicationSetCondition {
var paramtersGeneratedCondition argoprojiov1alpha1.ApplicationSetCondition
if parametersGenerated {
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
Message: "Successfully generated parameters for all Applications",
Reason: argoprojiov1alpha1.ApplicationSetReasonParametersGenerated,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}
} else {
paramtersGeneratedCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionParametersGenerated,
Message: message,
Reason: argoprojiov1alpha1.ApplicationSetReasonErrorOccurred,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
}
}
return paramtersGeneratedCondition
}
func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argoprojiov1alpha1.ApplicationSetCondition {
var resourceUpToDateCondition argoprojiov1alpha1.ApplicationSetCondition
if errorOccurred {
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: message,
Reason: reason,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
}
} else {
resourceUpToDateCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate,
Message: "ApplicationSet up to date",
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusTrue,
}
}
return resourceUpToDateCondition
}
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argoprojiov1alpha1.ApplicationSet, condition argoprojiov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error {
// check if error occurred during reconcile process
errOccurred := condition.Type == argoprojiov1alpha1.ApplicationSetConditionErrorOccurred
var errOccurredCondition argoprojiov1alpha1.ApplicationSetCondition
if errOccurred {
errOccurredCondition = condition
} else {
errOccurredCondition = argoprojiov1alpha1.ApplicationSetCondition{
Type: argoprojiov1alpha1.ApplicationSetConditionErrorOccurred,
Message: "Successfully generated parameters for all Applications",
Reason: argoprojiov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
Status: argoprojiov1alpha1.ApplicationSetConditionStatusFalse,
}
}
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
newConditions := []argoprojiov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
needToUpdateConditions := false
for _, condition := range newConditions {
// do nothing if appset already has same condition
for _, c := range applicationSet.Status.Conditions {
if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) {
needToUpdateConditions = true
break
}
}
}
evaluatedTypes := map[argoprojiov1alpha1.ApplicationSetConditionType]bool{
argoprojiov1alpha1.ApplicationSetConditionErrorOccurred: true,
argoprojiov1alpha1.ApplicationSetConditionParametersGenerated: true,
argoprojiov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
}
if needToUpdateConditions || len(applicationSet.Status.Conditions) < 3 {
// fetch updated Application Set object before updating it
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %v", err)
}
applicationSet.Status.SetConditions(
newConditions, evaluatedTypes,
)
// Update the newly fetched object with new set of conditions
err := r.Client.Status().Update(ctx, applicationSet)
if err != nil && !apierr.IsNotFound(err) {
return fmt.Errorf("unable to set application set condition: %v", err)
}
}
return nil
}
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
// generated applications.
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argoprojiov1alpha1.ApplicationSet, namespace string) (map[int]error, error) {
errorsByIndex := map[int]error{}
namesSet := map[string]bool{}
for i, app := range desiredApplications {
if !namesSet[app.Name] {
namesSet[app.Name] = true
} else {
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
continue
}
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(namespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
continue
}
return nil, err
}
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, namespace); err != nil {
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
continue
}
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
if err != nil {
return nil, err
}
if len(conditions) > 0 {
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
continue
}
}
return errorsByIndex, nil
}
func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argoprojiov1alpha1.ApplicationSet) time.Duration {
var res time.Duration
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
relevantGenerators := generators.GetRelevantGenerators(&requestedGenerator, r.Generators)
for _, g := range relevantGenerators {
t := g.GetRequeueAfter(&requestedGenerator)
if res == 0 {
res = t
} else if t != 0 && t < res {
res = t
}
}
}
return res
}
func getTempApplication(applicationSetTemplate argoprojiov1alpha1.ApplicationSetTemplate) *argov1alpha1.Application {
var tmplApplication argov1alpha1.Application
tmplApplication.Annotations = applicationSetTemplate.Annotations
tmplApplication.Labels = applicationSetTemplate.Labels
tmplApplication.Namespace = applicationSetTemplate.Namespace
tmplApplication.Name = applicationSetTemplate.Name
tmplApplication.Spec = applicationSetTemplate.Spec
tmplApplication.Finalizers = applicationSetTemplate.Finalizers
return &tmplApplication
}
func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argoprojiov1alpha1.ApplicationSetReasonType, error) {
var res []argov1alpha1.Application
var firstError error
var applicationSetReason argoprojiov1alpha1.ApplicationSetReasonType
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo)
if err != nil {
log.WithError(err).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
firstError = err
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonApplicationParamsGenerationError
}
continue
}
for _, a := range t {
tmplApplication := getTempApplication(a.Template)
for _, p := range a.Params {
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p)
if err != nil {
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
firstError = err
applicationSetReason = argoprojiov1alpha1.ApplicationSetReasonRenderTemplateParamsError
}
continue
}
res = append(res, *app)
}
}
log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
}
return res, applicationSetReason, firstError
}
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
// grab the job object, extract the owner...
app := rawObj.(*argov1alpha1.Application)
owner := metav1.GetControllerOf(app)
if owner == nil {
return nil
}
// ...make sure it's a application set...
if owner.APIVersion != argoprojiov1alpha1.GroupVersion.String() || owner.Kind != "ApplicationSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&argoprojiov1alpha1.ApplicationSet{}).
Owns(&argov1alpha1.Application{}).
Watches(
&source.Kind{Type: &corev1.Secret{}},
&clusterSecretEventHandler{
Client: mgr.GetClient(),
Log: log.WithField("type", "createSecretEventHandler"),
}).
// TODO: also watch Applications and respond on changes if we own them.
Complete(r)
}
// createOrUpdateInCluster will create / update application resources in the cluster.
// - For new applications, it will call create
// - For existing application, it will call update
// The function also adds owner reference to all applications, and uses it to delete them.
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var firstError error
// Creates or updates the application in appList
for _, generatedApp := range desiredApplications {
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
generatedApp.Namespace = applicationSet.Namespace
found := &argov1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: generatedApp.Name,
Namespace: generatedApp.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Application",
APIVersion: "argoproj.io/v1alpha1",
},
}
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
found.Spec = generatedApp.Spec
// Preserve specially treated argo cd annotations:
// * https://github.com/argoproj/applicationset/issues/180
// * https://github.com/argoproj/argo-cd/issues/10500
for _, key := range preservedAnnotations {
if state, exists := found.ObjectMeta.Annotations[key]; exists {
if generatedApp.Annotations == nil {
generatedApp.Annotations = map[string]string{}
}
generatedApp.Annotations[key] = state
}
}
found.ObjectMeta.Annotations = generatedApp.Annotations
found.ObjectMeta.Finalizers = generatedApp.Finalizers
found.ObjectMeta.Labels = generatedApp.Labels
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
})
if err != nil {
appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action)
if firstError == nil {
firstError = err
}
continue
}
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
appLog.Logf(log.InfoLevel, "%s Application", action)
}
return firstError
}
// createInCluster will filter from the desiredApplications only the application that needs to be created
// Then it will call createOrUpdateInCluster to do the actual create
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var createApps []argov1alpha1.Application
current, err := r.getCurrentApplications(ctx, applicationSet)
if err != nil {
return err
}
m := make(map[string]bool) // Will holds the app names that are current in the cluster
for _, app := range current {
m[app.Name] = true
}
// filter applications that are not in m[string]bool (new to the cluster)
for _, app := range desiredApplications {
_, exists := m[app.Name]
if !exists {
createApps = append(createApps, app)
}
}
return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
}
func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argoprojiov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
// TODO: Should this use the context param?
var current argov1alpha1.ApplicationList
err := r.Client.List(context.Background(), &current, client.MatchingFields{".metadata.controller": applicationSet.Name})
if err != nil {
return nil, err
}
return current.Items, nil
}
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
// The function must be called after all generators had been called and generated applications
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
// clusterList, err := argoDB.ListClusters(ctx)
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, applicationSet.Namespace)
if err != nil {
return err
}
// Save current applications to be able to delete the ones that are not in appList
current, err := r.getCurrentApplications(ctx, applicationSet)
if err != nil {
return err
}
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
for _, app := range desiredApplications {
m[app.Name] = true
}
// Delete apps that are not in m[string]bool
var firstError error
for _, app := range current {
appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
_, exists := m[app.Name]
if !exists {
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
if err != nil {
appLog.WithError(err).Error("failed to update Application")
if firstError != nil {
firstError = err
}
continue
}
err = r.Client.Delete(ctx, &app)
if err != nil {
appLog.WithError(err).Error("failed to delete Application")
if firstError != nil {
firstError = err
}
continue
}
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
appLog.Log(log.InfoLevel, "Deleted application")
}
}
return firstError
}
// removeFinalizerOnInvalidDestination removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx context.Context, applicationSet argoprojiov1alpha1.ApplicationSet, app *argov1alpha1.Application, clusterList *argov1alpha1.ClusterList, appLog *log.Entry) error {
// Only check if the finalizers need to be removed IF there are finalizers to remove
if len(app.Finalizers) == 0 {
return nil
}
var validDestination bool
// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
if err := utils.ValidateDestination(ctx, &app.Spec.Destination, r.KubeClientset, applicationSet.Namespace); err != nil {
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
validDestination = false
} else {
// Detect if the destination's server field does not match an existing cluster
matchingCluster := false
for _, cluster := range clusterList.Items {
// Server fields must match. Note that ValidateDestination ensures that the server field is set, if applicable.
if app.Spec.Destination.Server != cluster.Server {
continue
}
// The name must match, if it is not empty
if app.Spec.Destination.Name != "" && cluster.Name != app.Spec.Destination.Name {
continue
}
matchingCluster = true
break
}
if !matchingCluster {
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name)
}
validDestination = matchingCluster
}
// If the destination is invalid (for example the cluster is no longer defined), then remove
// the application finalizers to avoid triggering Argo CD bug #5817
if !validDestination {
// Filter out the Argo CD finalizer from the finalizer list
var newFinalizers []string
for _, existingFinalizer := range app.Finalizers {
if existingFinalizer != argov1alpha1.ResourcesFinalizerName { // only remove this one
newFinalizers = append(newFinalizers, existingFinalizer)
}
}
// If the finalizer length changed (due to filtering out an Argo finalizer), update the finalizer list on the app
if len(newFinalizers) != len(app.Finalizers) {
app.Finalizers = newFinalizers
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Updated", "Updated Application %q finalizer before deletion, because application has an invalid destination", app.Name)
appLog.Log(log.InfoLevel, "Updating application finalizer before deletion, because application has an invalid destination")
err := r.Client.Update(ctx, app, &client.UpdateOptions{})
if err != nil {
return err
}
}
}
return nil
}
var _ handler.EventHandler = &clusterSecretEventHandler{}

File diff suppressed because it is too large Load Diff

View File

@@ -1,84 +0,0 @@
package controllers
import (
"context"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
// clusterSecretEventHandler is used when watching Secrets to check if they are ArgoCD Cluster Secrets, and if so
// requeue any related ApplicationSets.
type clusterSecretEventHandler struct {
//handler.EnqueueRequestForOwner
Log log.FieldLogger
Client client.Client
}
func (h *clusterSecretEventHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.Object)
}
func (h *clusterSecretEventHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.ObjectNew)
}
func (h *clusterSecretEventHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.Object)
}
func (h *clusterSecretEventHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {
h.queueRelatedAppGenerators(q, e.Object)
}
// addRateLimitingInterface defines the Add method of workqueue.RateLimitingInterface, allow us to easily mock
// it for testing purposes.
type addRateLimitingInterface interface {
Add(item interface{})
}
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(q addRateLimitingInterface, object client.Object) {
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
return
}
h.Log.WithFields(log.Fields{
"namespace": object.GetNamespace(),
"name": object.GetName(),
}).Info("processing event for cluster secret")
appSetList := &argoprojiov1alpha1.ApplicationSetList{}
err := h.Client.List(context.Background(), appSetList)
if err != nil {
h.Log.WithError(err).Error("unable to list ApplicationSets")
return
}
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
for _, appSet := range appSetList.Items {
foundClusterGenerator := false
for _, generator := range appSet.Spec.Generators {
if generator.Clusters != nil {
foundClusterGenerator = true
break
}
}
if foundClusterGenerator {
// TODO: only queue the AppGenerator if the labels match this cluster
req := ctrl.Request{NamespacedName: types.NamespacedName{Namespace: appSet.Namespace, Name: appSet.Name}}
q.Add(req)
}
}
}

View File

@@ -1,234 +0,0 @@
package controllers
import (
"testing"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
func TestClusterEventHandler(t *testing.T) {
scheme := runtime.NewScheme()
err := argoprojiov1alpha1.AddToScheme(scheme)
assert.Nil(t, err)
err = argov1alpha1.AddToScheme(scheme)
assert.Nil(t, err)
tests := []struct {
name string
items []argoprojiov1alpha1.ApplicationSet
secret corev1.Secret
expectedRequests []ctrl.Request
}{
{
name: "no application sets should mean no requests",
items: []argoprojiov1alpha1.ApplicationSet{},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{},
},
{
name: "a cluster generator should produce a request",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{{
NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"},
}},
},
{
name: "multiple cluster generators should produce multiple requests",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set2",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set2"}},
},
},
{
name: "non-cluster generator should not match",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "another-namespace",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
{
ObjectMeta: v1.ObjectMeta{
Name: "app-set-non-cluster",
Namespace: "argocd",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
List: &argoprojiov1alpha1.ListGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
},
},
{
name: "non-argo cd secret should not match",
items: []argoprojiov1alpha1.ApplicationSet{
{
ObjectMeta: v1.ObjectMeta{
Name: "my-app-set",
Namespace: "another-namespace",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{
{
Clusters: &argoprojiov1alpha1.ClusterGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd",
Name: "my-non-argocd-secret",
},
},
expectedRequests: []reconcile.Request{},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
appSetList := argoprojiov1alpha1.ApplicationSetList{
Items: test.items,
}
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
handler := &clusterSecretEventHandler{
Client: fakeClient,
Log: log.WithField("type", "createSecretEventHandler"),
}
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
handler.queueRelatedAppGenerators(&mockAddRateLimitingInterface, &test.secret)
assert.False(t, mockAddRateLimitingInterface.errorOccurred)
assert.ElementsMatch(t, mockAddRateLimitingInterface.addedItems, test.expectedRequests)
})
}
}
// Add checks the type, and adds it to the internal list of received additions
func (obj *mockAddRateLimitingInterface) Add(item interface{}) {
if req, ok := item.(ctrl.Request); ok {
obj.addedItems = append(obj.addedItems, req)
} else {
obj.errorOccurred = true
}
}
type mockAddRateLimitingInterface struct {
errorOccurred bool
addedItems []ctrl.Request
}

View File

@@ -1,180 +0,0 @@
package controllers
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
dynfake "k8s.io/client-go/dynamic/fake"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
func TestRequeueAfter(t *testing.T) {
mockServer := argoCDServiceMock{}
ctx := context.Background()
scheme := runtime.NewScheme()
err := argoappsetv1alpha1.AddToScheme(scheme)
assert.Nil(t, err)
gvrToListKind := map[schema.GroupVersionResource]string{{
Group: "mallard.io",
Version: "v1",
Resource: "ducks",
}: "DuckList"}
appClientset := kubefake.NewSimpleClientset()
k8sClient := fake.NewClientBuilder().Build()
duckType := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v2quack",
"kind": "Duck",
"metadata": map[string]interface{}{
"name": "mightyduck",
"namespace": "namespace",
"labels": map[string]interface{}{"duck": "all-species"},
},
"status": map[string]interface{}{
"decisions": []interface{}{
map[string]interface{}{
"clusterName": "staging-01",
},
map[string]interface{}{
"clusterName": "production-01",
},
},
},
},
}
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
terminalGenerators := map[string]generators.Generator{
"List": generators.NewListGenerator(),
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
"Git": generators.NewGitGenerator(mockServer),
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build()),
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
"PullRequest": generators.NewPullRequestGenerator(k8sClient),
}
nestedGenerators := map[string]generators.Generator{
"List": terminalGenerators["List"],
"Clusters": terminalGenerators["Clusters"],
"Git": terminalGenerators["Git"],
"SCMProvider": terminalGenerators["SCMProvider"],
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
"PullRequest": terminalGenerators["PullRequest"],
"Matrix": generators.NewMatrixGenerator(terminalGenerators),
"Merge": generators.NewMergeGenerator(terminalGenerators),
}
topLevelGenerators := map[string]generators.Generator{
"List": terminalGenerators["List"],
"Clusters": terminalGenerators["Clusters"],
"Git": terminalGenerators["Git"],
"SCMProvider": terminalGenerators["SCMProvider"],
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
"PullRequest": terminalGenerators["PullRequest"],
"Matrix": generators.NewMatrixGenerator(nestedGenerators),
"Merge": generators.NewMergeGenerator(nestedGenerators),
}
client := fake.NewClientBuilder().WithScheme(scheme).Build()
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(0),
Generators: topLevelGenerators,
}
type args struct {
appset *argoappsetv1alpha1.ApplicationSet
}
tests := []struct {
name string
args args
want time.Duration
wantErr assert.ErrorAssertionFunc
}{
{name: "Cluster", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{Clusters: &argoappsetv1alpha1.ClusterGenerator{}}},
},
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
{name: "ClusterMergeNested", args: args{&argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
{Merge: &argoappsetv1alpha1.MergeGenerator{
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
{
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
Git: &argoappsetv1alpha1.GitGenerator{},
},
},
}},
},
},
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
{name: "ClusterMatrixNested", args: args{&argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{
{Clusters: &argoappsetv1alpha1.ClusterGenerator{}},
{Matrix: &argoappsetv1alpha1.MatrixGenerator{
Generators: []argoappsetv1alpha1.ApplicationSetNestedGenerator{
{
Clusters: &argoappsetv1alpha1.ClusterGenerator{},
Git: &argoappsetv1alpha1.GitGenerator{},
},
},
}},
},
},
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
{name: "ListGenerator", args: args{appset: &argoappsetv1alpha1.ApplicationSet{
Spec: argoappsetv1alpha1.ApplicationSetSpec{
Generators: []argoappsetv1alpha1.ApplicationSetGenerator{{List: &argoappsetv1alpha1.ListGenerator{}}},
},
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
})
}
}
type argoCDServiceMock struct {
mock *mock.Mock
}
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
args := a.mock.Called(ctx, repoURL, revision)
return args.Get(0).([]string), args.Error(1)
}
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
args := a.mock.Called(ctx, repoURL, revision, pattern)
return args.Get(0).(map[string][]byte), args.Error(1)
}
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
args := a.mock.Called(ctx, repoURL, revision, path)
return args.Get(0).([]byte), args.Error(1)
}
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
args := a.mock.Called(ctx, repoURL, revision)
return args.Get(0).([]string), args.Error(1)
}

View File

@@ -1,19 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- clusters: {}
template:
metadata:
name: '{{name}}-guestbook'
spec:
project: "default"
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
targetRevision: HEAD
path: guestbook
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,57 +0,0 @@
# How the Cluster Decision Resource generator works for clusterDecisionResource
1. The Cluster Decision Resource generator reads a configurable status format:
```yaml
status:
clusters:
- name: cluster-01
- name: cluster-02
```
This is a common status format. Another format that could be read looks like this:
```yaml
status:
decisions:
- clusterName: cluster-01
namespace: cluster-01
- clusterName: cluster-02
namespace: cluster-02
```
2. Any resource that has a list of key / value pairs, where the value matches ArgoCD cluster names can be used.
3. The key / value pairs found in each element of the list will be available to the template. As well, `name` and `server` will still be available to the template.
4. The Service Account used by the ApplicationSet controller must have access to `Get` the resource you want to retrieve the duck type definition from
5. A configMap is used to identify the resource to read status of generated ArgoCD clusters from. You can use multiple resources by creating a ConfigMap for each one in the ArgoCD namespace.
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: my-configmap
data:
apiVersion: group.io/v1
kind: mykinds
statusListKey: clusters
matchKey: name
```
* `apiVersion` - This is the apiVersion of your resource
* `kind` - This is the plural kind of your resource
* `statusListKey` - Default is 'clusters', this is the key found in your resource's status that is a list of ArgoCD clusters.
* `matchKey` - Is the key name found in the cluster list, `name` and `clusterName` are the keys in the examples above.
# Applying the example
1. Connect to a cluster with the ApplicationSet controller running
2. Edit the Role for the ApplicationSet service account, and grant it permission to `list` the `placementdecisions` resources, from apiGroups `cluster.open-cluster-management.io/v1alpha1`
```yaml
- apiGroups:
- "cluster.open-cluster-management.io/v1alpha1"
resources:
- placementdecisions
verbs:
- list
```
3. Apply the following controller and associated ManagedCluster CRD's:
https://github.com/open-cluster-management/placement
4. Now apply the PlacementDecision and an ApplicationSet:
```bash
kubectl apply -f ./placementdecision.yaml
kubectl apply -f ./configMap.yaml
kubectl apply -f ./ducktype-example.yaml
```
5. For now this won't do anything until you create a controller that populates the `Status.Decisions` array.

View File

@@ -1,11 +0,0 @@
# To generate a Status.Decisions from this CRD, requires https://github.com/open-cluster-management/multicloud-operators-placementrule be deployed
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ocm-placement
data:
apiVersion: apps.open-cluster-management.io/v1
kind: placementrules
statusListKey: decisions
matchKey: clusterName

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: book-import
spec:
generators:
- clusterDecisionResource:
configMapRef: ocm-placement
name: test-placement
requeueAfterSeconds: 30
template:
metadata:
name: '{{clusterName}}-book-import'
spec:
project: "default"
source:
repoURL: https://github.com/open-cluster-management/application-samples.git
targetRevision: HEAD
path: book-import
destination:
name: '{{clusterName}}'
namespace: bookimport
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,18 +0,0 @@
---
apiVersion: apps.open-cluster-management.io/v1
kind: PlacementRule
metadata:
name: test-placement
spec:
clusterReplicas: 1 # Availability choice, maximum number of clusters to provision at once
clusterSelector:
matchLabels:
'usage': 'development'
clusterConditions:
- type: ManagedClusterConditionAvailable
status: "True"
# Below is sample output the generator can consume.
status:
decisions:
- clusterName: cluster-01
- clusterName: cluster-02

View File

@@ -1,22 +0,0 @@
# This is an example of a typical ApplicationSet which uses the cluster generator.
# An ApplicationSet is comprised with two stanzas:
# - spec.generator - producer of a list of values supplied as arguments to an app template
# - spec.template - an application template, which has been parameterized
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- clusters: {}
template:
metadata:
name: '{{name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
chart: guestbook
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,33 +0,0 @@
# The cluster generator produces an items list from all clusters registered to Argo CD.
# It automatically provides the following fields as values to the app template:
# - name
# - server
# - metadata.labels.<key>
# - metadata.annotations.<key>
# - values.<key>
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- clusters:
selector:
matchLabels:
argocd.argoproj.io/secret-type: cluster
values:
project: default
template:
metadata:
name: '{{name}}-guestbook'
labels:
environment: '{{metadata.labels.environment}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
chart: guestbook
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,44 +0,0 @@
# This example demonstrates the git directory generator, which produces an items list
# based on discovery of directories in a git repo matching a specified pattern.
# Git generators automatically provide {{path}} and {{path.basename}} as available
# variables to the app template.
#
# Suppose the following git directory structure (note the use of different config tools):
#
# cluster-deployments
# └── add-ons
# ├── argo-rollouts
# │   ├── all.yaml
# │   └── kustomization.yaml
# ├── argo-workflows
# │   └── install.yaml
# ├── grafana
# │   ├── Chart.yaml
# │   └── values.yaml
# └── prometheus-operator
# ├── Chart.yaml
# └── values.yaml
#
# The following ApplicationSet would produce four applications (in different namespaces),
# using the directory basename as both the namespace and application name.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-addons
spec:
generators:
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
directories:
- path: add-ons/*
template:
metadata:
name: '{{path.basename}}'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: http://kubernetes.default.svc
namespace: '{{path.basename}}'

View File

@@ -1,55 +0,0 @@
# This example demonstrates a git file generator which traverses the directory structure of a git
# repository to discover items based on a filename convention. For each file discovered, the
# contents of the discovered files themselves, act as the set of inputs to the app template.
#
# Suppose the following git directory structure:
#
# cluster-deployments
# ├── apps
# │ └── guestbook
# │ └── install.yaml
# └── cluster-config
# ├── engineering
# │ ├── dev
# │ │ └── config.json
# │ └── prod
# │ └── config.json
# └── finance
# ├── dev
# │ └── config.json
# └── prod
# └── config.json
#
# The discovered files (e.g. config.json) files can be any structured data supplied to the
# generated application. e.g.:
# {
# "aws_account": "123456",
# "asset_id": "11223344"
# "cluster": {
# "owner": "Jesse_Suen@intuit.com",
# "name": "engineering-dev",
# "address": "http://1.2.3.4"
# }
# }
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
files:
- path: "**/config.json"
template:
metadata:
name: '{{cluster.name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: apps/guestbook
destination:
server: '{{cluster.address}}'
namespace: guestbook

View File

@@ -1,68 +0,0 @@
# This example demonstrates a git file generator which produces its items based on one or
# more files referenced in a git repo. The referenced files would contain a json/yaml list of
# arbitrary structured objects. Each item of the list would become a set of parameters to a
# generated application.
#
# Suppose the following git directory structure:
#
# cluster-deployments
# ├── apps
# │ └── guestbook
# │ ├── v1.0
# │ │ └── install.yaml
# │ └── v2.0
# │ └── install.yaml
# └── config
# └── clusters.json
#
# In this example, the `clusters.json` file is json list of structured data:
# [
# {
# "account": "123456",
# "asset_id": "11223344",
# "cluster": {
# "owner": "Jesse_Suen@intuit.com",
# "name": "engineering-dev",
# "address": "http://1.2.3.4"
# },
# "appVersions": {
# "prometheus-operator": "v0.38",
# "guestbook": "v2.0"
# }
# },
# {
# "account": "456789",
# "asset_id": "55667788",
# "cluster": {
# "owner": "Alexander_Matyushentsev@intuit.com",
# "name": "engineering-prod",
# "address": "http://2.4.6.8"
# },
# "appVersions": {
# "prometheus-operator": "v0.38",
# "guestbook": "v1.0"
# }
# }
# ]
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
files:
- path: config/clusters.json
template:
metadata:
name: '{{cluster.name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: apps/guestbook/{{appVersions.guestbook}}
destination:
server: http://kubernetes.default.svc
namespace: guestbook

View File

@@ -1,33 +0,0 @@
# The list generator specifies a literal list of argument values to the app spec template.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
values:
project: dev
- cluster: engineering-prod
url: https://2.4.6.8
values:
project: prod
- cluster: finance-preprod
url: https://9.8.7.6
values:
project: preprod
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: guestbook/{{cluster}}
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,3 +0,0 @@
# Proposal Examples
This directory contains examples that are not yet implemented.
They are part of the project to indicate future progress, and we are welcome any contribution that will add an implementation

View File

@@ -1,48 +0,0 @@
# For all generators, filters can be applied to reduce the generated items to a smaller subset.
# A powerful set of filter expressions are supported using syntax provided by the
# https://github.com/antonmedv/expr library. Examples expressions are demonstrated below
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
# Match all clusters who meet ALL of the following conditions:
# 1. name matches the regex `sales-.*`
# 2. environment label is either 'staging' or 'prod'
- clusters:
filters:
- expr: '{{name}} matches "sales-.*"'
- expr: '{{metadata.labels.environment}} in [staging, prod]'
values:
version: '2.0.0'
# Filter items from `config/clusters.json` in the `cluster-deployments` git repo,
# to only those having the `cluster.enabled == true` property. e.g.:
# {
# ...
# "cluster": {
# "enabled": true,
# ...
# }
# }
- git:
repoURL: https://github.com/infra-team/cluster-deployments.git
files:
- path: config/clusters.json
filters:
- expr: '{{cluster.enabled}} == true'
template:
metadata:
name: '{{name}}-guestbook'
spec:
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: "{{values.version}}"
chart: guestbook
helm:
parameters:
- name: foo
value: "{{metadata.annotations.foo}}"
destination:
server: '{{server}}'
namespace: guestbook

View File

@@ -1,48 +0,0 @@
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
# useful to do this in order to override the spec.template stanza, and when simple string
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
# a partial definition, which overrides/patch the default template.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
template:
metadata: {}
spec:
project: "project"
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
path: '{{cluster}}-override'
destination: {}
- list:
elements:
- cluster: engineering-prod
url: https://1.2.3.4
template:
metadata: {}
spec:
project: "project2"
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
path: '{{cluster}}-override2'
destination: {}
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: "project"
source:
repoURL: https://github.com/infra-team/cluster-deployments.git
targetRevision: HEAD
path: guestbook/{{cluster}}
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,6 +0,0 @@
#namePrefix: kustomize-
resources:
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: helm-prometheus-operator
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,4 +0,0 @@
dependencies:
- name: kube-prometheus-stack
version: 40.5.0
repository: https://prometheus-community.github.io/helm-charts

View File

@@ -1,6 +0,0 @@
#namePrefix: kustomize-
resources:
- https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/namespace-install.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,23 +0,0 @@
apiVersion: v2
name: helm-guestbook
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "helm-guestbook.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "helm-guestbook.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "helm-guestbook.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "helm-guestbook.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -1,32 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "helm-guestbook.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "helm-guestbook.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "helm-guestbook.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -1,52 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "helm-guestbook.fullname" . }}
labels:
app: {{ template "helm-guestbook.name" . }}
chart: {{ template "helm-guestbook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
revisionHistoryLimit: 3
selector:
matchLabels:
app: {{ template "helm-guestbook.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "helm-guestbook.name" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "helm-guestbook.fullname" . }}
labels:
app: {{ template "helm-guestbook.name" . }}
chart: {{ template "helm-guestbook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ template "helm-guestbook.name" . }}
release: {{ .Release.Name }}

View File

@@ -1,45 +0,0 @@
# Default values for helm-guestbook.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: gcr.io/heptio-images/ks-guestbook-demo
tag: 0.1
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: helm-prometheus-operator
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,4 +0,0 @@
dependencies:
- name: kube-prometheus-stack
version: 40.5.0
repository: https://prometheus-community.github.io/helm-charts

View File

@@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-addons
namespace: argocd
spec:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/git-generator-directory/excludes/cluster-addons/*
- exclude: true
path: applicationset/examples/git-generator-directory/excludes/cluster-addons/exclude-helm-guestbook
template:
metadata:
name: '{{path.basename}}'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: https://kubernetes.default.svc
namespace: '{{path.basename}}'
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-addons
namespace: argocd
spec:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/git-generator-directory/cluster-addons/*
template:
metadata:
name: '{{path.basename}}'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: https://kubernetes.default.svc
namespace: '{{path.basename}}'
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,7 +0,0 @@
namePrefix: kustomize-
resources:
- guestbook-ui-deployment.yaml
- guestbook-ui-svc.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,9 +0,0 @@
{
"aws_account": "123456",
"asset_id": "11223344",
"cluster": {
"owner": "cluster-admin@company.com",
"name": "engineering-dev",
"address": "http://1.2.3.4"
}
}

View File

@@ -1,9 +0,0 @@
{
"aws_account": "123456",
"asset_id": "11223344",
"cluster": {
"owner": "cluster-admin@company.com",
"name": "engineering-prod",
"address": "http://1.2.3.4"
}
}

View File

@@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
files:
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
template:
metadata:
name: '{{cluster.name}}-guestbook'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: "applicationset/examples/git-generator-files-discovery/apps/guestbook"
destination:
server: https://kubernetes.default.svc
#server: '{{cluster.address}}'
namespace: guestbook

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
- cluster: engineering-prod
url: https://kubernetes.default.svc
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: applicationset/examples/list-generator/guestbook/{{cluster}}
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,6 +0,0 @@
#namePrefix: kustomize-
resources:
- namespace-install.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,417 +0,0 @@
# This is an auto-generated file. DO NOT EDIT
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterworkflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: ClusterWorkflowTemplate
listKind: ClusterWorkflowTemplateList
plural: clusterworkflowtemplates
shortNames:
- clusterwftmpl
- cwft
singular: clusterworkflowtemplate
scope: Cluster
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cronworkflows.argoproj.io
spec:
group: argoproj.io
names:
kind: CronWorkflow
listKind: CronWorkflowList
plural: cronworkflows
shortNames:
- cwf
- cronwf
singular: cronworkflow
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workfloweventbindings.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowEventBinding
listKind: WorkflowEventBindingList
plural: workfloweventbindings
shortNames:
- wfeb
singular: workfloweventbinding
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflows.argoproj.io
spec:
additionalPrinterColumns:
- JSONPath: .status.phase
description: Status of the workflow
name: Status
type: string
- JSONPath: .status.startedAt
description: When the workflow was started
format: date-time
name: Age
type: date
group: argoproj.io
names:
kind: Workflow
listKind: WorkflowList
plural: workflows
shortNames:
- wf
singular: workflow
scope: Namespaced
subresources: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowTemplate
listKind: WorkflowTemplateList
plural: workflowtemplates
shortNames:
- wftmpl
singular: workflowtemplate
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-role
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- get
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- create
- apiGroups:
- argoproj.io
resources:
- workflowtemplates
- workflowtemplates/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- argoproj.io
resources:
- cronworkflows
- cronworkflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-server-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- watch
- create
- patch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- apiGroups:
- argoproj.io
resources:
- workflows
- workfloweventbindings
- workflowtemplates
- cronworkflows
- cronworkflows/finalizers
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-role
subjects:
- kind: ServiceAccount
name: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-server-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-server-role
subjects:
- kind: ServiceAccount
name: argo-server
---
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
---
apiVersion: v1
kind: Service
metadata:
name: argo-server
spec:
ports:
- name: web
port: 2746
targetPort: 2746
selector:
app: argo-server
---
apiVersion: v1
kind: Service
metadata:
name: workflow-controller-metrics
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: workflow-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: argo-server
spec:
selector:
matchLabels:
app: argo-server
template:
metadata:
labels:
app: argo-server
spec:
containers:
- args:
- server
- --namespaced
image: argoproj/argocli:v2.12.5
name: argo-server
ports:
- containerPort: 2746
name: web
readinessProbe:
httpGet:
path: /
port: 2746
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 20
volumeMounts:
- mountPath: /tmp
name: tmp
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
serviceAccountName: argo-server
volumes:
- emptyDir: {}
name: tmp
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: workflow-controller
spec:
selector:
matchLabels:
app: workflow-controller
template:
metadata:
labels:
app: workflow-controller
spec:
containers:
- args:
- --configmap
- workflow-controller-configmap
- --executor-image
- argoproj/argoexec:v2.12.5
- --namespaced
command:
- workflow-controller
image: argoproj/workflow-controller:v2.12.5
livenessProbe:
httpGet:
path: /metrics
port: metrics
initialDelaySeconds: 30
periodSeconds: 30
name: workflow-controller
ports:
- containerPort: 9090
name: metrics
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
serviceAccountName: argo

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: helm-prometheus-operator
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"

View File

@@ -1,4 +0,0 @@
dependencies:
- name: kube-prometheus-stack
version: 9.4.10
repository: https://prometheus-community.github.io/helm-charts

View File

@@ -1,33 +0,0 @@
# This example demonstrates the combining of the git generator with a cluster generator
# The expected output would be an application per git directory and a cluster (application_count = git directory * clusters)
#
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: cluster-git
spec:
generators:
- matrix:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/matrix/cluster-addons/*
- clusters:
selector:
matchLabels:
argocd.argoproj.io/secret-type: cluster
template:
metadata:
name: '{{path.basename}}-{{name}}'
spec:
project: '{{metadata.labels.environment}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{server}}'
namespace: '{{path.basename}}'

View File

@@ -1,39 +0,0 @@
# This example demonstrates the combining of the git generator with a list generator
# The expected output would be an application per git directory and a list entry (application_count = git directory * list entries)
#
#
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: list-git
spec:
generators:
- matrix:
generators:
- git:
repoURL: https://github.com/argoproj/argo-cd.git
revision: HEAD
directories:
- path: applicationset/examples/matrix/cluster-addons/*
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
values:
project: dev
- cluster: engineering-prod
url: https://2.4.6.8
values:
project: prod
template:
metadata:
name: '{{path.basename}}-{{cluster}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{url}}'
namespace: '{{path.basename}}'

View File

@@ -1,37 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: list-and-list
namespace: argocd
spec:
generators:
- matrix:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
values:
project: default
- cluster: engineering-prod
url: https://kubernetes.default.svc
values:
project: default
- list:
elements:
- values:
suffix: '1'
- values:
suffix: '2'
template:
metadata:
name: '{{cluster}}-{{values.suffix}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{url}}'
namespace: '{{path.basename}}'

View File

@@ -1,67 +0,0 @@
# The matrix generator can contain other combination-type generators (matrix and union). But nested matrix and union
# generators cannot contain further-nested matrix or union generators.
#
# The generators are evaluated from most-nested to least-nested. In this case:
# 1. The union generator joins two lists to make 3 parameter sets.
# 2. The inner matrix generator takes the cartesian product of the two lists to make 4 parameters sets.
# 3. The outer matrix generator takes the cartesian product of the 3 union and the 4 inner matrix parameter sets to
# make 3*4=12 final parameter sets.
# 4. The 12 final parameter sets are evaluated against the top-level template to generate 12 Applications.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: matrix-and-union-in-matrix
spec:
generators:
- matrix:
generators:
- union:
mergeKeys:
- cluster
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
values:
project: default
- cluster: engineering-prod
url: https://kubernetes.default.svc
values:
project: default
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
values:
project: default
- cluster: engineering-test
url: https://kubernetes.default.svc
values:
project: default
- matrix:
generators:
- list:
elements:
- values:
suffix: '1'
- values:
suffix: '2'
- list:
elements:
- values:
prefix: 'first'
- values:
prefix: 'second'
template:
metadata:
name: '{{values.prefix}}-{{cluster}}-{{values.suffix}}'
spec:
project: '{{values.project}}'
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: '{{path}}'
destination:
server: '{{url}}'
namespace: '{{path.basename}}'

View File

@@ -1,44 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: merge-clusters-and-list
spec:
generators:
- merge:
mergeKeys:
- server
generators:
- clusters:
values:
kafka: 'true'
redis: 'false'
# For clusters with a specific label, enable Kafka.
- clusters:
selector:
matchLabels:
use-kafka: 'false'
values:
kafka: 'false'
# For a specific cluster, enable Redis.
- list:
elements:
- server: https://some-specific-cluster
values.redis: 'true'
template:
metadata:
name: '{{name}}'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
targetRevision: HEAD
path: helm-guestbook
helm:
parameters:
- name: kafka
value: '{{values.kafka}}'
- name: redis
value: '{{values.redis}}'
destination:
server: '{{server}}'
namespace: default

View File

@@ -1,43 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: merge-two-matrixes
spec:
generators:
- merge:
mergeKeys:
- server
- environment
generators:
- matrix:
generators:
- clusters:
values:
replicaCount: '2'
- list:
elements:
- environment: staging
namespace: guestbook-non-prod
- environment: prod
namespace: guestbook
- list:
elements:
- server: https://kubernetes.default.svc
environment: staging
values.replicaCount: '1'
template:
metadata:
name: '{{name}}-guestbook-{{environment}}'
spec:
project: default
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
targetRevision: HEAD
path: helm-guestbook
helm:
parameters:
- name: replicaCount
value: '{{values.replicaCount}}'
destination:
server: '{{server}}'
namespace: '{{namespace}}'

View File

@@ -1,40 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: myapp
spec:
generators:
- pullRequest:
github:
# The GitHub organization or user.
owner: myorg
# The Github repository
repo: myrepo
# For GitHub Enterprise. (optional)
api: https://git.example.com/
# Reference to a Secret containing an access token. (optional)
tokenRef:
secretName: github-token
key: token
# Labels is used to filter the PRs that you want to target. (optional)
labels:
- preview
template:
metadata:
name: 'myapp-{{ branch }}-{{ number }}'
spec:
source:
repoURL: 'https://github.com/myorg/myrepo.git'
targetRevision: '{{ head_sha }}'
path: helm-guestbook
helm:
parameters:
- name: "image.tag"
value: "pull-{{ head_sha }}"
project: default
destination:
server: https://kubernetes.default.svc
namespace: "{{ branch }}-{{ number }}"
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- scmProvider:
github:
organization: argoproj
cloneProtocol: https
filters:
- repositoryMatch: example-apps
template:
metadata:
name: '{{ repository }}-guestbook'
spec:
project: "default"
source:
repoURL: '{{ url }}'
targetRevision: '{{ branch }}'
path: guestbook
destination:
server: https://kubernetes.default.svc
namespace: guestbook

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,7 +0,0 @@
namePrefix: kustomize-
resources:
- guestbook-ui-deployment.yaml
- guestbook-ui-svc.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui

View File

@@ -1,7 +0,0 @@
namePrefix: kustomize-
resources:
- guestbook-ui-deployment.yaml
- guestbook-ui-svc.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View File

@@ -1,36 +0,0 @@
# App templates can also be defined as part of the generator's template stanza. Sometimes it is
# useful to do this in order to override the spec.template stanza, and when simple string
# parameterization are insufficient. In the below examples, the generators[].XXX.template is
# a partial definition, which overrides/patch the default template.
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://kubernetes.default.svc
template:
metadata: {}
spec:
project: "default"
source:
targetRevision: HEAD
repoURL: https://github.com/argoproj/argo-cd.git
path: 'applicationset/examples/template-override/{{cluster}}-override'
destination: {}
template:
metadata:
name: '{{cluster}}-guestbook'
spec:
project: "default"
source:
repoURL: https://github.com/argoproj/argo-cd.git
targetRevision: HEAD
path: applicationset/examples/template-override/default
destination:
server: '{{url}}'
namespace: guestbook

View File

@@ -1,186 +0,0 @@
package generators
import (
"context"
"fmt"
"regexp"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v2/util/settings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/argoproj/argo-cd/v2/applicationset/utils"
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/applicationset/v1alpha1"
)
const (
ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type"
ArgoCDSecretTypeCluster = "cluster"
)
var _ Generator = (*ClusterGenerator)(nil)
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
type ClusterGenerator struct {
client.Client
ctx context.Context
clientset kubernetes.Interface
// namespace is the Argo CD namespace
namespace string
settingsManager *settings.SettingsManager
}
func NewClusterGenerator(c client.Client, ctx context.Context, clientset kubernetes.Interface, namespace string) Generator {
settingsManager := settings.NewSettingsManager(ctx, clientset, namespace)
g := &ClusterGenerator{
Client: c,
ctx: ctx,
clientset: clientset,
namespace: namespace,
settingsManager: settingsManager,
}
return g
}
// GetRequeueAfter never requeue the cluster generator because the `clusterSecretEventHandler` will requeue the appsets
// when the cluster secrets change
func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration {
return NoRequeueAfter
}
func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) *argoappsetv1alpha1.ApplicationSetTemplate {
return &appSetGenerator.Clusters.Template
}
func (g *ClusterGenerator) GenerateParams(
appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, _ *argoappsetv1alpha1.ApplicationSet) ([]map[string]string, error) {
if appSetGenerator == nil {
return nil, EmptyAppSetGeneratorError
}
if appSetGenerator.Clusters == nil {
return nil, EmptyAppSetGeneratorError
}
// Do not include the local cluster in the cluster parameters IF there is a non-empty selector
// - Since local clusters do not have secrets, they do not have labels to match against
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
if err != nil {
return nil, err
}
if clustersFromArgoCD == nil {
return nil, nil
}
clusterSecrets, err := g.getSecretsByClusterName(appSetGenerator)
if err != nil {
return nil, err
}
res := []map[string]string{}
secretsFound := []corev1.Secret{}
for _, cluster := range clustersFromArgoCD.Items {
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
// handled by the next step.
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
secretsFound = append(secretsFound, secretForCluster)
} else if !ignoreLocalClusters {
// If there is no secret for the cluster, it's the local cluster, so handle it here.
params := map[string]string{}
params["name"] = cluster.Name
params["server"] = cluster.Server
for key, value := range appSetGenerator.Clusters.Values {
params[fmt.Sprintf("values.%s", key)] = value
}
log.WithField("cluster", "local cluster").Info("matched local cluster")
res = append(res, params)
}
}
// For each matching cluster secret (non-local clusters only)
for _, cluster := range secretsFound {
params := map[string]string{}
params["name"] = string(cluster.Data["name"])
params["nameNormalized"] = sanitizeName(string(cluster.Data["name"]))
params["server"] = string(cluster.Data["server"])
for key, value := range cluster.ObjectMeta.Annotations {
params[fmt.Sprintf("metadata.annotations.%s", key)] = value
}
for key, value := range cluster.ObjectMeta.Labels {
params[fmt.Sprintf("metadata.labels.%s", key)] = value
}
for key, value := range appSetGenerator.Clusters.Values {
params[fmt.Sprintf("values.%s", key)] = value
}
log.WithField("cluster", cluster.Name).Info("matched cluster secret")
res = append(res, params)
}
return res, nil
}
func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
// List all Clusters:
clusterSecretList := &corev1.SecretList{}
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster)
secretSelector, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return nil, err
}
if err := g.Client.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
return nil, err
}
log.Debug("clusters matching labels", "count", len(clusterSecretList.Items))
res := map[string]corev1.Secret{}
for _, cluster := range clusterSecretList.Items {
clusterName := string(cluster.Data["name"])
res[clusterName] = cluster
}
return res, nil
}
// sanitize the name in accordance with the below rules
// 1. contain no more than 253 characters
// 2. contain only lowercase alphanumeric characters, '-' or '.'
// 3. start and end with an alphanumeric character
func sanitizeName(name string) string {
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
maxDNSNameLength := 253
name = strings.ToLower(name)
name = invalidDNSNameChars.ReplaceAllString(name, "-")
if len(name) > maxDNSNameLength {
name = name[:maxDNSNameLength]
}
return strings.Trim(name, "-.")
}

Some files were not shown because too many files have changed in this diff Show More