mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-03-13 11:58:46 +01:00
Compare commits
269 Commits
release-1.
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
259926ad99 | ||
|
|
d0b54e6b8f | ||
|
|
6054497bbc | ||
|
|
3a2cceea4f | ||
|
|
e7d1553cfc | ||
|
|
3c629baf09 | ||
|
|
f46333ea82 | ||
|
|
5f897118e1 | ||
|
|
a94a8eb422 | ||
|
|
a37a50b808 | ||
|
|
468ed0a1c2 | ||
|
|
0fdef4861e | ||
|
|
eafbf929da | ||
|
|
a552df5625 | ||
|
|
2c551ee4be | ||
|
|
7af770b11f | ||
|
|
36bade7a2d | ||
|
|
5754bd3357 | ||
|
|
095c5d616b | ||
|
|
c3b3a0fc58 | ||
|
|
20d56730ff | ||
|
|
ffc99354d2 | ||
|
|
e845478a96 | ||
|
|
28dd7167d0 | ||
|
|
faee66888e | ||
|
|
fc753ac489 | ||
|
|
88a76022c1 | ||
|
|
31df9d11a9 | ||
|
|
7ed6c18762 | ||
|
|
81d5b13083 | ||
|
|
02624bf3a6 | ||
|
|
842a3d12f6 | ||
|
|
14f1725f53 | ||
|
|
9b142c799a | ||
|
|
d77072b534 | ||
|
|
8a77075cff | ||
|
|
f5b600d4af | ||
|
|
4ae70139d9 | ||
|
|
7660e40fdc | ||
|
|
310b40aa20 | ||
|
|
0b0c72a80b | ||
|
|
14188656c8 | ||
|
|
76bacfdea4 | ||
|
|
ee44e489b5 | ||
|
|
89774fef17 | ||
|
|
acc2369dc7 | ||
|
|
9de06e35eb | ||
|
|
4575adca86 | ||
|
|
ca42a375c2 | ||
|
|
0214eb8d92 | ||
|
|
949518e680 | ||
|
|
81e4bb1fef | ||
|
|
35b40cdb22 | ||
|
|
fa47fe00a2 | ||
|
|
743371ed4f | ||
|
|
75d9f23adb | ||
|
|
e05ebc4990 | ||
|
|
6ffd34dcf9 | ||
|
|
16c6eaf9ae | ||
|
|
e67a55463f | ||
|
|
476cb655b7 | ||
|
|
6c1ccf4d60 | ||
|
|
05f5a79923 | ||
|
|
c2c19f42ad | ||
|
|
3dbc330cf0 | ||
|
|
8ad928330f | ||
|
|
355e77e56f | ||
|
|
376d79a454 | ||
|
|
b74a9461ed | ||
|
|
b4236e1dc7 | ||
|
|
6ecc25edbd | ||
|
|
56ca1fb4ea | ||
|
|
3629346085 | ||
|
|
092072a281 | ||
|
|
e1142f9759 | ||
|
|
fbd3fe69ff | ||
|
|
f05f84979c | ||
|
|
3d6ff9e903 | ||
|
|
4c812576c1 | ||
|
|
6753fc9743 | ||
|
|
8d082cc46e | ||
|
|
f586385c8b | ||
|
|
466c73fa3b | ||
|
|
9e6c78d55c | ||
|
|
c6af4cca10 | ||
|
|
5448466ddc | ||
|
|
0eec2fee71 | ||
|
|
8a3b36bd28 | ||
|
|
35a7350b74 | ||
|
|
241e6d0a6e | ||
|
|
dbd6b406ac | ||
|
|
a069639135 | ||
|
|
76241f9d3b | ||
|
|
e5452ff70e | ||
|
|
9fdd782854 | ||
|
|
053ae28ed5 | ||
|
|
6eb4f41343 | ||
|
|
d9072d8200 | ||
|
|
238abbf771 | ||
|
|
aa4fb9ab4a | ||
|
|
55bc144410 | ||
|
|
c428e091ab | ||
|
|
c13bf422f8 | ||
|
|
b4bbe60b8c | ||
|
|
4fdf573fd1 | ||
|
|
d326daef62 | ||
|
|
98337065ae | ||
|
|
bdda410463 | ||
|
|
1b1df76ef2 | ||
|
|
7f40739b97 | ||
|
|
2faa5c89d1 | ||
|
|
9db879f68f | ||
|
|
e4235dabb8 | ||
|
|
ff07b112b1 | ||
|
|
53e618a4c0 | ||
|
|
a12b7bdb74 | ||
|
|
eae0527839 | ||
|
|
2d79dbb0bb | ||
|
|
a501cdbb56 | ||
|
|
d2c1821148 | ||
|
|
00d44910b8 | ||
|
|
9b21c25783 | ||
|
|
e1deca2a9e | ||
|
|
7ae204d426 | ||
|
|
62621428b1 | ||
|
|
6411958be5 | ||
|
|
ab1f9e4658 | ||
|
|
36d1b42d5c | ||
|
|
8b9d25f6e3 | ||
|
|
323af4d562 | ||
|
|
a946b70b5e | ||
|
|
a7f6866344 | ||
|
|
c71bfc62ba | ||
|
|
c4d6fde1c4 | ||
|
|
306a84193a | ||
|
|
b02f7f14a7 | ||
|
|
ac8ac14545 | ||
|
|
cdb8758b34 | ||
|
|
521f87fe5f | ||
|
|
27141ff083 | ||
|
|
7599516f68 | ||
|
|
e3a18b9cd7 | ||
|
|
eef35a32ab | ||
|
|
e26dace64d | ||
|
|
1520346369 | ||
|
|
702d4358d1 | ||
|
|
f9f1bdaabe | ||
|
|
e66b6109f7 | ||
|
|
0162971ea0 | ||
|
|
53897e5019 | ||
|
|
7e0d8a490c | ||
|
|
7fd7999e49 | ||
|
|
03f773d0ff | ||
|
|
c4bc740fb7 | ||
|
|
5934bc4699 | ||
|
|
3f0d26ec17 | ||
|
|
7665e58613 | ||
|
|
3684a10332 | ||
|
|
ab80a8126b | ||
|
|
6905196665 | ||
|
|
4e283c14fb | ||
|
|
1b5925a494 | ||
|
|
d500b27f1d | ||
|
|
868b4c4c7c | ||
|
|
4bbce1cb22 | ||
|
|
127f50d697 | ||
|
|
e51aab8d1f | ||
|
|
85a746f861 | ||
|
|
3c2be61827 | ||
|
|
42d572306d | ||
|
|
b3f8e7a02c | ||
|
|
476b09cbbf | ||
|
|
487d6647d5 | ||
|
|
0378819c54 | ||
|
|
bbb925cb63 | ||
|
|
9d1a378ce8 | ||
|
|
e2358cabc9 | ||
|
|
5cd12a3943 | ||
|
|
ebb06b8c89 | ||
|
|
d5d01eca3e | ||
|
|
e13bb79578 | ||
|
|
a8b6282b15 | ||
|
|
fc00d73cf5 | ||
|
|
303d46e67c | ||
|
|
a00798bc5e | ||
|
|
1c4a15129b | ||
|
|
0ca35ef26c | ||
|
|
b38a9aacb2 | ||
|
|
5b239fc1d1 | ||
|
|
fdf7566bb7 | ||
|
|
389858b6df | ||
|
|
20adad76ef | ||
|
|
f37ae1c1f6 | ||
|
|
6edd18bb89 | ||
|
|
4d23fe8108 | ||
|
|
7eeefb003c | ||
|
|
3ae5b2bfe4 | ||
|
|
cdebd26ab4 | ||
|
|
3a088c7c86 | ||
|
|
5a363e9d9f | ||
|
|
57ea24281c | ||
|
|
94d7c10baa | ||
|
|
28027897aa | ||
|
|
0c610f91e5 | ||
|
|
990d9ef92b | ||
|
|
6592773a35 | ||
|
|
beee4de10e | ||
|
|
7fde387dd6 | ||
|
|
64c8ac70fb | ||
|
|
f230df938e | ||
|
|
c9b0fdf1d7 | ||
|
|
ea57d15a80 | ||
|
|
ebc048167c | ||
|
|
3b8405a89b | ||
|
|
66d496d1ef | ||
|
|
9d71ae5ad6 | ||
|
|
85d660f0b9 | ||
|
|
d5286296eb | ||
|
|
916d4aed57 | ||
|
|
59d7b7d2b4 | ||
|
|
18c8716f0a | ||
|
|
a1afe44066 | ||
|
|
1695457f9c | ||
|
|
06bc4064c1 | ||
|
|
d67b4f6c36 | ||
|
|
d2ff5887ac | ||
|
|
205926fa80 | ||
|
|
c4dd9d19c2 | ||
|
|
74fe4af98e | ||
|
|
949808f0b2 | ||
|
|
9ef80ef1f7 | ||
|
|
1801212ac7 | ||
|
|
722d5b02d9 | ||
|
|
9f8505205f | ||
|
|
9e81c38c13 | ||
|
|
ff40297bdc | ||
|
|
c8d74d1a7f | ||
|
|
a3f8ec33f4 | ||
|
|
c7718242f9 | ||
|
|
57eeaa4231 | ||
|
|
7edcf47a03 | ||
|
|
f561f22caa | ||
|
|
dcea620ca6 | ||
|
|
9790a5da9c | ||
|
|
b1d281e7bb | ||
|
|
2e7fa935c4 | ||
|
|
8b69efcdb9 | ||
|
|
8b08a337c5 | ||
|
|
e22f946415 | ||
|
|
3c6715a6f9 | ||
|
|
4cf02fd813 | ||
|
|
17d217c2d6 | ||
|
|
32d5a05aef | ||
|
|
e3f3688227 | ||
|
|
ad715565a6 | ||
|
|
d7cad4ac6d | ||
|
|
cc6c67d343 | ||
|
|
6ada626dda | ||
|
|
5bc59003af | ||
|
|
fe583c2f5d | ||
|
|
539281f89e | ||
|
|
e5ea3fe1fb | ||
|
|
f36ea4646d | ||
|
|
ddcdbaa990 | ||
|
|
857ce87f00 | ||
|
|
4643a1c26d | ||
|
|
3bf8dc6fb0 | ||
|
|
f62559128d | ||
|
|
63fe5f32ba |
@@ -1,5 +1,15 @@
|
||||
version: 2.1
|
||||
commands:
|
||||
prepare_environment:
|
||||
steps:
|
||||
- run:
|
||||
name: Configure environment
|
||||
command: |
|
||||
set -x
|
||||
echo "export GOCACHE=/tmp/go-build-cache" | tee -a $BASH_ENV
|
||||
echo "export ARGOCD_TEST_VERBOSE=true" | tee -a $BASH_ENV
|
||||
echo "export ARGOCD_TEST_PARALLELISM=8" | tee -a $BASH_ENV
|
||||
echo "export ARGOCD_SONAR_VERSION=4.2.0.1873" | tee -a $BASH_ENV
|
||||
configure_git:
|
||||
steps:
|
||||
- run:
|
||||
@@ -14,55 +24,104 @@ commands:
|
||||
restore_vendor:
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: Restore vendor cache
|
||||
keys:
|
||||
- vendor-v1-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }}
|
||||
- vendor-v2-{{ checksum "Gopkg.lock" }}
|
||||
save_vendor:
|
||||
steps:
|
||||
- save_cache:
|
||||
key: vendor-v1-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }}
|
||||
name: Save vendor cache
|
||||
key: vendor-v2-{{ checksum "Gopkg.lock" }}
|
||||
paths:
|
||||
- ./vendor
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- vendor
|
||||
save_coverage_info:
|
||||
steps:
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- coverage.out
|
||||
save_node_modules:
|
||||
steps:
|
||||
- persist_to_workspace:
|
||||
root: ~/argo-cd
|
||||
paths:
|
||||
- ui/node_modules
|
||||
attach_vendor:
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: .
|
||||
install_golang:
|
||||
steps:
|
||||
- run:
|
||||
name: Install Golang v1.12.6
|
||||
name: Install Golang v1.14.1
|
||||
command: |
|
||||
go get golang.org/dl/go1.12.6
|
||||
[ -e /home/circleci/sdk/go1.12.6 ] || go1.12.6 download
|
||||
go get golang.org/dl/go1.14.1
|
||||
[ -e /home/circleci/sdk/go1.14.1 ] || go1.14.1 download
|
||||
go env
|
||||
echo "export GOPATH=/home/circleci/.go_workspace" | tee -a $BASH_ENV
|
||||
echo "export PATH=/home/circleci/sdk/go1.12.6/bin:\$PATH" | tee -a $BASH_ENV
|
||||
echo "export PATH=/home/circleci/sdk/go1.14.1/bin:\$PATH" | tee -a $BASH_ENV
|
||||
save_go_cache:
|
||||
steps:
|
||||
- save_cache:
|
||||
key: go-v1-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }}
|
||||
key: go-v2-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }}
|
||||
# https://circleci.com/docs/2.0/language-go/
|
||||
paths:
|
||||
- /home/circleci/.cache/go-build
|
||||
- /home/circleci/sdk/go1.12.6
|
||||
- /home/circleci/sdk/go1.14.1
|
||||
restore_go_cache:
|
||||
steps:
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-v1-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }}
|
||||
- go-v1-master-{{ .Environment.CIRCLE_JOB }}
|
||||
- go-v2-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }}
|
||||
|
||||
save_go_cache_docker:
|
||||
steps:
|
||||
- save_cache:
|
||||
name: Save Go build cache
|
||||
key: go-docker-v2-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}
|
||||
# https://circleci.com/docs/2.0/language-go/
|
||||
paths:
|
||||
- /tmp/go-build-cache
|
||||
restore_go_cache_docker:
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: Restore Go build cache
|
||||
keys:
|
||||
- go-docker-v2-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}
|
||||
jobs:
|
||||
codegen:
|
||||
build:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: argoproj/argocd-test-tools:v0.5.0
|
||||
working_directory: /go/src/github.com/argoproj/argo-cd
|
||||
steps:
|
||||
- prepare_environment
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- codegen-v1-{{ checksum "Gopkg.lock" }}-{{ checksum "hack/installers/install-codegen-go-tools.sh" }}
|
||||
- run: ./hack/install.sh codegen-go-tools
|
||||
- run: sudo ./hack/install.sh codegen-tools
|
||||
- run: dep ensure -v
|
||||
- save_cache:
|
||||
key: codegen-v1-{{ checksum "Gopkg.lock" }}-{{ checksum "hack/installers/install-codegen-go-tools.sh" }}
|
||||
paths: [vendor, /tmp/dl, /go/pkg]
|
||||
- run: helm init --client-only
|
||||
- restore_vendor
|
||||
- run:
|
||||
name: Ensuring Gopkg.lock is up-to-date
|
||||
command: make dep-check-local
|
||||
- run:
|
||||
name: Syncing vendor dependencies
|
||||
command: dep ensure -v
|
||||
- run: make build-local
|
||||
- run: chmod -R 777 vendor
|
||||
- run: chmod -R 777 ${GOCACHE}
|
||||
- save_vendor
|
||||
- save_go_cache_docker
|
||||
|
||||
codegen:
|
||||
docker:
|
||||
- image: argoproj/argocd-test-tools:v0.5.0
|
||||
working_directory: /go/src/github.com/argoproj/argo-cd
|
||||
steps:
|
||||
- prepare_environment
|
||||
- checkout
|
||||
- restore_vendor
|
||||
- run: helm2 init --client-only
|
||||
- run: make codegen-local
|
||||
- run:
|
||||
name: Check nothing has changed
|
||||
@@ -77,42 +136,122 @@ jobs:
|
||||
path: codegen.patch
|
||||
destination: .
|
||||
test:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
|
||||
machine:
|
||||
image: circleci/classic:201808-01
|
||||
working_directory: /go/src/github.com/argoproj/argo-cd
|
||||
docker:
|
||||
- image: argoproj/argocd-test-tools:v0.5.0
|
||||
steps:
|
||||
- restore_go_cache
|
||||
- install_golang
|
||||
- prepare_environment
|
||||
- checkout
|
||||
- restore_cache:
|
||||
key: test-dl-v1
|
||||
- run: sudo ./hack/install.sh kubectl-linux kubectx-linux dep-linux ksonnet-linux helm-linux kustomize-linux
|
||||
- save_cache:
|
||||
key: test-dl-v1
|
||||
paths: [/tmp/dl]
|
||||
- configure_git
|
||||
- run: go get github.com/jstemmer/go-junit-report
|
||||
- restore_vendor
|
||||
- run: dep ensure -v
|
||||
- run: make test
|
||||
- save_vendor
|
||||
- save_go_cache
|
||||
- restore_go_cache_docker
|
||||
- run: make test-local
|
||||
- run:
|
||||
name: Uploading code coverage
|
||||
command: bash <(curl -s https://codecov.io/bash) -f coverage.out
|
||||
- run:
|
||||
name: Output of test-results
|
||||
command: |
|
||||
ls -l test-results || true
|
||||
cat test-results/junit.xml || true
|
||||
- save_coverage_info
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
- store_artifacts:
|
||||
path: test-results
|
||||
destination: .
|
||||
|
||||
lint:
|
||||
working_directory: /go/src/github.com/argoproj/argo-cd
|
||||
docker:
|
||||
- image: argoproj/argocd-test-tools:v0.5.0
|
||||
steps:
|
||||
- prepare_environment
|
||||
- checkout
|
||||
- configure_git
|
||||
- restore_vendor
|
||||
- run: dep ensure -v
|
||||
- restore_go_cache_docker
|
||||
- run:
|
||||
name: Run golangci-lint
|
||||
command: ARGOCD_LINT_GOGC=10 make lint-local
|
||||
- run:
|
||||
name: Check that nothing has changed
|
||||
command: |
|
||||
gDiff=$(git diff)
|
||||
if test "$gDiff" != ""; then
|
||||
echo
|
||||
echo "###############################################################################"
|
||||
echo "golangci-lint has made automatic corrections to your code. Please check below"
|
||||
echo "diff output and commit this to your local branch, or run make lint locally."
|
||||
echo "###############################################################################"
|
||||
echo
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sonarcloud:
|
||||
working_directory: /go/src/github.com/argoproj/argo-cd
|
||||
docker:
|
||||
- image: argoproj/argocd-test-tools:v0.5.0
|
||||
environment:
|
||||
NODE_MODULES: /go/src/github.com/argoproj/argo-cd/ui/node_modules
|
||||
steps:
|
||||
- prepare_environment
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
command: mkdir -p /tmp/cache/scanner
|
||||
name: Create cache directory if it doesn't exist
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-sonarcloud-scanner-4.2.0.1873
|
||||
- run:
|
||||
command: |
|
||||
set -e
|
||||
VERSION=4.2.0.1873
|
||||
SONAR_TOKEN=$SONAR_TOKEN
|
||||
SCANNER_DIRECTORY=/tmp/cache/scanner
|
||||
export SONAR_USER_HOME=$SCANNER_DIRECTORY/.sonar
|
||||
OS="linux"
|
||||
echo $SONAR_USER_HOME
|
||||
|
||||
if [[ ! -x "$SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/bin/sonar-scanner" ]]; then
|
||||
curl -Ol https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-$VERSION-$OS.zip
|
||||
unzip -qq -o sonar-scanner-cli-$VERSION-$OS.zip -d $SCANNER_DIRECTORY
|
||||
fi
|
||||
|
||||
chmod +x $SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/bin/sonar-scanner
|
||||
chmod +x $SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/jre/bin/java
|
||||
|
||||
# Workaround for a possible bug in CircleCI
|
||||
if ! echo $CIRCLE_PULL_REQUEST | grep https://github.com/argoproj; then
|
||||
unset CIRCLE_PULL_REQUEST
|
||||
unset CIRCLE_PULL_REQUESTS
|
||||
fi
|
||||
|
||||
# Explicitly set NODE_MODULES
|
||||
export NODE_MODULES=/go/src/github.com/argoproj/argo-cd/ui/node_modules
|
||||
export NODE_PATH=/go/src/github.com/argoproj/argo-cd/ui/node_modules
|
||||
|
||||
$SCANNER_DIRECTORY/sonar-scanner-$VERSION-$OS/bin/sonar-scanner
|
||||
name: SonarCloud
|
||||
- save_cache:
|
||||
key: v1-sonarcloud-scanner-4.2.0.1873
|
||||
paths:
|
||||
- /tmp/cache/scanner
|
||||
|
||||
e2e:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
|
||||
machine:
|
||||
image: circleci/classic:201808-01
|
||||
image: ubuntu-1604:201903-01
|
||||
environment:
|
||||
ARGOCD_FAKE_IN_CLUSTER: "true"
|
||||
ARGOCD_SSH_DATA_PATH: "/tmp/argo-e2e/app/config/ssh"
|
||||
ARGOCD_TLS_DATA_PATH: "/tmp/argo-e2e/app/config/tls"
|
||||
ARGOCD_E2E_K3S: "true"
|
||||
steps:
|
||||
- run:
|
||||
name: Install and start K3S v0.5.0
|
||||
@@ -120,68 +259,60 @@ jobs:
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
sudo chmod -R a+rw /etc/rancher/k3s
|
||||
kubectl version
|
||||
background: true
|
||||
environment:
|
||||
INSTALL_K3S_EXEC: --docker
|
||||
INSTALL_K3S_VERSION: v0.5.0
|
||||
- restore_go_cache
|
||||
- install_golang
|
||||
- prepare_environment
|
||||
- restore_go_cache_docker
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys: [e2e-dl-v1]
|
||||
- run: sudo ./hack/install.sh kubectx-linux dep-linux ksonnet-linux helm-linux kustomize-linux
|
||||
- run: go get github.com/jstemmer/go-junit-report
|
||||
keys: [e2e-dl-v2]
|
||||
- run: sudo ./hack/install.sh dep-linux
|
||||
- save_cache:
|
||||
key: e2e-dl-v10
|
||||
key: e2e-dl-v2
|
||||
paths: [/tmp/dl]
|
||||
- restore_vendor
|
||||
- attach_vendor
|
||||
- run: dep ensure -v
|
||||
- configure_git
|
||||
- run: make cli
|
||||
- run:
|
||||
name: Create namespace
|
||||
name: Update kubectl configuration for container
|
||||
command: |
|
||||
set -x
|
||||
kubectl create ns argocd-e2e
|
||||
kubens argocd-e2e
|
||||
# install the certificates (not 100% sure we need this)
|
||||
sudo cp /var/lib/rancher/k3s/server/tls/token-ca.crt /usr/local/share/ca-certificates/k3s.crt
|
||||
sudo update-ca-certificates
|
||||
# create the kubecfg, again - not sure we need this
|
||||
cat /etc/rancher/k3s/k3s.yaml | sed "s/localhost/`hostname`/" | tee ~/.kube/config
|
||||
echo "127.0.0.1 `hostname`" | sudo tee -a /etc/hosts
|
||||
- run:
|
||||
name: Apply manifests
|
||||
command: kustomize build test/manifests/base | kubectl apply -f -
|
||||
- run:
|
||||
name: Start Redis
|
||||
command: docker run --rm --name argocd-redis -i -p 6379:6379 redis:5.0.3-alpine --save "" --appendonly no
|
||||
background: true
|
||||
- run:
|
||||
name: Start repo server
|
||||
command: go run ./cmd/argocd-repo-server/main.go --loglevel debug --redis localhost:6379
|
||||
background: true
|
||||
- run:
|
||||
name: Start API server
|
||||
command: go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:6379 --insecure --dex-server http://localhost:5556 --repo-server localhost:8081 --staticassets ../argo-cd-ui/dist/app
|
||||
background: true
|
||||
- run:
|
||||
name: Start Test Git
|
||||
command: |
|
||||
test/fixture/testrepos/start-git.sh
|
||||
background: true
|
||||
- run: until curl -v http://localhost:8080/healthz; do sleep 10; done
|
||||
- run:
|
||||
name: Start controller
|
||||
command: go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:6379 --repo-server localhost:8081 --kubeconfig ~/.kube/config
|
||||
background: true
|
||||
- run:
|
||||
command: PATH=dist:$PATH make test-e2e
|
||||
ipaddr=$(ifconfig $IFACE |grep "inet " | awk '{print $2}')
|
||||
if echo $ipaddr | grep -q 'addr:'; then
|
||||
ipaddr=$(echo $ipaddr | awk -F ':' '{print $2}')
|
||||
fi
|
||||
test -d $HOME/.kube || mkdir -p $HOME/.kube
|
||||
kubectl config view --raw | sed -e "s/127.0.0.1:6443/${ipaddr}:6443/g" -e "s/localhost:6443/${ipaddr}:6443/g" > $HOME/.kube/config
|
||||
environment:
|
||||
ARGOCD_OPTS: "--server localhost:8080 --plaintext"
|
||||
IFACE: ens4
|
||||
- run:
|
||||
name: Start E2E test server
|
||||
command: make start-e2e
|
||||
background: true
|
||||
environment:
|
||||
DOCKER_SRCDIR: /home/circleci/.go_workspace/src
|
||||
ARGOCD_E2E_TEST: "true"
|
||||
ARGOCD_IN_CI: "true"
|
||||
- run:
|
||||
name: Wait for API server to become available
|
||||
command: |
|
||||
count=1
|
||||
until curl -v http://localhost:8080/healthz; do
|
||||
sleep 10;
|
||||
if test $count -ge 60; then
|
||||
echo "Timeout"
|
||||
exit 1
|
||||
fi
|
||||
count=$((count+1))
|
||||
done
|
||||
- run:
|
||||
name: Run E2E tests
|
||||
command: |
|
||||
make test-e2e
|
||||
environment:
|
||||
ARGOCD_OPTS: "--plaintext"
|
||||
ARGOCD_E2E_K3S: "true"
|
||||
- save_vendor
|
||||
- save_go_cache
|
||||
IFACE: ens4
|
||||
DOCKER_SRCDIR: /home/circleci/.go_workspace/src
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
- store_artifacts:
|
||||
@@ -205,13 +336,30 @@ jobs:
|
||||
- run: ./node_modules/.bin/codecov -p ..
|
||||
- run: NODE_ENV='production' yarn build
|
||||
- run: yarn lint
|
||||
- save_node_modules
|
||||
|
||||
orbs:
|
||||
sonarcloud: sonarsource/sonarcloud@1.0.1
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
workflow:
|
||||
jobs:
|
||||
- test
|
||||
- codegen
|
||||
- build
|
||||
- test:
|
||||
requires:
|
||||
- build
|
||||
- codegen:
|
||||
requires:
|
||||
- build
|
||||
- ui:
|
||||
requires:
|
||||
- codegen
|
||||
- e2e
|
||||
- build
|
||||
- sonarcloud:
|
||||
context: SonarCloud
|
||||
requires:
|
||||
- test
|
||||
- ui
|
||||
- e2e:
|
||||
requires:
|
||||
- build
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -5,6 +5,10 @@ title: ''
|
||||
labels: 'bug'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
If you are trying to resolve an environment-specific issue or have a one-off question about the edge case that does not require a feature then please consider asking a
|
||||
question in argocd slack [channel](https://argoproj.github.io/community/join-slack).
|
||||
|
||||
Checklist:
|
||||
|
||||
* [ ] I've searched in the docs and FAQ for my answer: http://bit.ly/argocd-faq.
|
||||
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -3,5 +3,5 @@ Checklist:
|
||||
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
|
||||
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
|
||||
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
|
||||
* [ ] Optional. My organization is added to the README.
|
||||
* [ ] Optional. My organization is added to USERS.md.
|
||||
* [ ] I've signed the CLA and my build is green ([troubleshooting builds](https://argoproj.github.io/argo-cd/developer-guide/ci/)).
|
||||
|
||||
2
.github/workflows/gh-pages.yaml
vendored
2
.github/workflows/gh-pages.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
python-version: 3.x
|
||||
- name: build
|
||||
run: |
|
||||
pip install mkdocs mkdocs_material
|
||||
pip install mkdocs==1.0.4 mkdocs_material==4.1.1
|
||||
mkdocs build
|
||||
mkdir ./site/.circleci && echo '{version: 2, jobs: {build: {branches: {ignore: gh-pages}}}}' > ./site/.circleci/config.yml
|
||||
- name: deploy
|
||||
|
||||
59
.github/workflows/image.yaml
vendored
Normal file
59
.github/workflows/image.yaml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
steps:
|
||||
- uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: '1.14.1'
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
- uses: actions/cache@v1
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd/vendor
|
||||
key: ${{ runner.os }}-go-dep-${{ hashFiles('**/Gopkg.lock') }}
|
||||
# download dependencies
|
||||
- run: mkdir -p $GITHUB_WORKSPACE/bin && curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
working-directory: src/github.com/argoproj/argo-cd
|
||||
- run: $GOPATH/bin/dep ensure -v
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
# get image tag
|
||||
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
id: image
|
||||
|
||||
# build
|
||||
- run: |
|
||||
docker images -a --format "{{.ID}}" | xargs -I {} docker rmi {}
|
||||
make image DEV_IMAGE=true DOCKER_PUSH=false IMAGE_NAMESPACE=docker.pkg.github.com/argoproj/argo-cd IMAGE_TAG=${{ steps.image.outputs.tag }}
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
# publish
|
||||
- run: |
|
||||
docker login docker.pkg.github.com --username $USERNAME --password $PASSWORD
|
||||
docker push docker.pkg.github.com/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }}
|
||||
env:
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.TOKEN }}
|
||||
|
||||
# deploy
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
- run: |
|
||||
docker run -v $(pwd):/src -w /src --rm -t lyft/kustomizer:v3.3.0 kustomize edit set image argoproj/argocd=docker.pkg.github.com/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }}
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
|
||||
working-directory: argoproj-deployments/argocd
|
||||
# TODO: clean up old images once github supports it: https://github.community/t5/How-to-use-Git-and-GitHub/Deleting-images-from-Github-Package-Registry/m-p/41202/thread-id/9811
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -9,4 +9,6 @@ site/
|
||||
cmd/**/debug
|
||||
debug.test
|
||||
coverage.out
|
||||
test-results
|
||||
test-results
|
||||
.scannerwork
|
||||
.scratch
|
||||
|
||||
653
CHANGELOG.md
653
CHANGELOG.md
@@ -1,14 +1,424 @@
|
||||
# Changelog
|
||||
|
||||
## v1.3.0-rc2 (2019-10-23)
|
||||
## v1.5.3 (Unreleased)
|
||||
|
||||
- Issue #2339 - Controller should compare with latest git revision if app has changed (#2543)
|
||||
- Unknown child app should not affect app health (#2544)
|
||||
- Redact secrets in dex logs (#2538)
|
||||
- Allows Helm parameters that contains arrays or maps. (#2525)
|
||||
- Set cookie policy to SameSite=lax and httpOnly (#2498)
|
||||
This patch release introduces a set of enhancements and bug fixes. Here are most notable changes:
|
||||
|
||||
## v1.3.0-rc1 (2019-10-16)
|
||||
#### Multiple Kustomize Versions
|
||||
|
||||
The bundled Kustomize version had been upgraded to v3.5.4. Argo CD allows changing bundled version using
|
||||
[custom image or init container](https://argoproj.github.io/argo-cd/operator-manual/custom_tools/).
|
||||
This [feature](https://argoproj.github.io/argo-cd/user-guide/kustomize/#custom-kustomize-versions)
|
||||
enables bundling multiple Kustomize versions at the same time and allows end-users to specify the required version per application.
|
||||
|
||||
#### Custom Root Path
|
||||
|
||||
The feature allows accessing Argo CD UI and API using a custom root path(for example https://myhostname/argocd).
|
||||
This enables running Argo CD behind a proxy that takes care of user authentication (such as Ambassador) or hosting
|
||||
multiple Argo CD using the same hostname. A set of bug fixes and enhancements had been implemented to makes it easier.
|
||||
Use new `--rootpath` [flag](https://argoproj.github.io/argo-cd/operator-manual/ingress/#argocd-server-and-ui-root-path-v153) to enable the feature.
|
||||
|
||||
### Login Rate Limiting
|
||||
|
||||
The feature prevents a built-in user password brute force attack and addresses the known
|
||||
[vulnerability](https://argoproj.github.io/argo-cd/security_considerations/#cve-2020-8827-insufficient-anti-automationanti-brute-force).
|
||||
|
||||
### Settings Management Tools
|
||||
|
||||
A new set of [CLI commands](https://argoproj.github.io/argo-cd/operator-manual/troubleshooting/) that simplify configuring Argo CD.
|
||||
Using the CLI you can test settings changes offline without affecting running Argo CD instance and have ability to troubleshot diffing
|
||||
customizations, custom resource health checks, and more.
|
||||
|
||||
### Other
|
||||
|
||||
* New Project and Application CRD settings ([#2900](https://github.com/argoproj/argo-cd/issues/2900), [#2873](https://github.com/argoproj/argo-cd/issues/2873)) that allows customizing Argo CD behavior.
|
||||
* Upgraded Dex (v2.22.0) enables seamless [SSO integration](https://www.openshift.com/blog/openshift-authentication-integration-with-argocd) with Openshift.
|
||||
|
||||
|
||||
#### Enhancements
|
||||
|
||||
* feat: added --grpc-web-root-path for CLI. (#3483)
|
||||
* feat: limit the maximum number of concurrent login attempts (#3467)
|
||||
* feat: upgrade kustomize version to 3.5.4 (#3472)
|
||||
* feat: upgrade dex to 2.22.0 (#3468)
|
||||
* feat: support user specified account token ids (#3425)
|
||||
* feat: support separate Kustomize version per application (#3414)
|
||||
* feat: add support for dex prometheus metrics (#3249)
|
||||
* feat: add settings troubleshooting commands to the 'argocd-util' binary (#3398)
|
||||
* feat: Let user to define meaningful unique JWT token name (#3388)
|
||||
* feat: Display link between OLM ClusterServiceVersion and it's OperatorGroup (#3390)
|
||||
* feat: Introduce sync-option SkipDryRunOnMissingResource=true (#2873) (#3247)
|
||||
* feat: support normalizing CRD fields that use known built-in K8S types (#3357)
|
||||
* feat: Whitelisted namespace resources (#2900)
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
* fix: added path to cookie (#3501)
|
||||
* fix: 'argocd sync' does not take into account IgnoreExtraneous annotation (#3486)
|
||||
* fix: CLI renders flipped diff results (#3480)
|
||||
* fix: GetApplicationSyncWindows API should not validate project permissions (#3456)
|
||||
* fix: argocd-util kubeconfig should use RawRestConfig to export config (#3447)
|
||||
* fix: javascript error on accounts list page (#3453)
|
||||
* fix: support both <group>/<kind> as well as <kind> as a resource override key (#3433)
|
||||
* fix: Updating to jsonnet v1.15.0 fix issue #3277 (#3431)
|
||||
* fix for helm repo add with flag --insecure-skip-server-verification (#3420)
|
||||
* fix: app diff --local support for helm repo. #3151 (#3407)
|
||||
* fix: Syncing apps incorrectly states "app synced", but this is not true (#3286)
|
||||
* fix: for jsonnet when it is localed in nested subdirectory and uses import (#3372)
|
||||
* fix: Update 4.5.3 redis-ha helm manifest (#3370)
|
||||
* fix: return 401 error code if username does not exist (#3369)
|
||||
* fix: Do not panic while running hooks with short revision (#3368)
|
||||
|
||||
## v1.5.2 (2020-04-20)
|
||||
|
||||
#### Critical security fix
|
||||
|
||||
This release contains a critical security fix. Please refer to the
|
||||
[security document](https://argoproj.github.io/argo-cd/security_considerations/#CVE-2020-5260-possible-git-credential-leak)
|
||||
for more information.
|
||||
|
||||
**Upgrading is strongly recommended**
|
||||
|
||||
## v1.4.3 (2020-04-20)
|
||||
|
||||
#### Critical security fix
|
||||
|
||||
This release contains a critical security fix. Please refer to the
|
||||
[security document](https://argoproj.github.io/argo-cd/security_considerations/#CVE-2020-5260-possible-git-credential-leak)
|
||||
for more information.
|
||||
|
||||
## v1.5.1 (2020-04-06)
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
* fix: return 401 error code if username does not exist (#3369)
|
||||
* fix: Do not panic while running hooks with short revision (#3368)
|
||||
* fix: Increase HAProxy check interval to prevent intermittent failures (#3356)
|
||||
* fix: Helm v3 CRD are not deployed (#3345)
|
||||
|
||||
## v1.5.0 (2020-04-02)
|
||||
|
||||
#### Helm Integration Enhancements - Helm 3 Support And More
|
||||
|
||||
Introduced native support Helm3 charts. For backward compatibility Helm 2 charts are still rendered using Helm 2 CLI. Argo CD inspects the
|
||||
Charts.yaml file and choose the right binary based on `apiVersion` value.
|
||||
|
||||
Following enhancement were implemented in addition to Helm 3:
|
||||
* The `--api-version` flag is passed to the `helm template` command during manifest generation.
|
||||
* The `--set-file` flag can be specified in the application specification.
|
||||
* Fixed bug that prevents automatically update Helm chart when new version is published (#3193)
|
||||
|
||||
#### Better Performance and Improved Metrics
|
||||
|
||||
If you are running Argo CD instances with several hundred applications on it, you should see a
|
||||
huge performance boost and significantly less Kubernetes API server load.
|
||||
|
||||
The Argo CD controller Prometheus metrics have been reworked to enable a richer Grafana dashboard.
|
||||
The improved dashboard is available at [examples/dashboard.json](https://github.com/argoproj/argo-cd/blob/master/examples/dashboard.json).
|
||||
You can set `ARGOCD_LEGACY_CONTROLLER_METRICS=true` environment variable and use [examples/dashboard-legacy.json](https://github.com/argoproj/argo-cd/blob/master/examples/dashboard-legacy.json)
|
||||
to keep using old dashboard.
|
||||
|
||||
#### Local accounts
|
||||
|
||||
The local accounts had been introduced additional to `admin` user and SSO integration. The feature is useful for creating authentication
|
||||
tokens with limited permissions to automate Argo CD management. Local accounts also could be used small by teams when SSO integration is overkill.
|
||||
This enhancement also allows to disable admin user and enforce only SSO logins.
|
||||
|
||||
#### Redis HA Proxy mode
|
||||
|
||||
As part of this release, the bundled Redis was upgraded to version 4.3.4 with enabled HAProxy.
|
||||
The HA proxy replaced the sentinel and provides more reliable Redis connection.
|
||||
|
||||
> After publishing 1.5.0 release we've discovered that default HAProxy settings might cause intermittent failures.
|
||||
> See [argo-cd#3358](https://github.com/argoproj/argo-cd/issues/3358)
|
||||
|
||||
#### Windows CLI
|
||||
|
||||
Windows users deploy to Kubernetes too! Now you can use Argo CD CLI on Linux, Mac OS, and Windows. The Windows compatible binary is available
|
||||
in the release details page as well as on the Argo CD Help page.
|
||||
|
||||
#### Breaking Changes
|
||||
|
||||
The `argocd_app_sync_status`, `argocd_app_health_status` and `argocd_app_created_time` prometheus metrics are deprecated in favor of additional labels
|
||||
to `argocd_app_info` metric. The deprecated labels are still available can be re-enabled using `ARGOCD_LEGACY_CONTROLLER_METRICS=true` environment variable.
|
||||
The legacy example Grafana dashboard is available at [examples/dashboard-legacy.json](https://github.com/argoproj/argo-cd/blob/master/examples/dashboard-legacy.json).
|
||||
|
||||
#### Known issues
|
||||
Last-minute bugs that will be addressed in 1.5.1 shortly:
|
||||
|
||||
* https://github.com/argoproj/argo-cd/issues/3336
|
||||
* https://github.com/argoproj/argo-cd/issues/3319
|
||||
* https://github.com/argoproj/argo-cd/issues/3339
|
||||
* https://github.com/argoproj/argo-cd/issues/3358
|
||||
|
||||
#### Enhancements
|
||||
* feat: support helm3 (#2383) (#3178)
|
||||
* feat: Argo CD Service Account / Local Users #3185
|
||||
* feat: Disable Admin Login (fixes #3019) (#3179)
|
||||
* feat(ui): add docs to sync policy options present in create application panel (Close #3098) (#3203)
|
||||
* feat: add "service-account" flag to "cluster add" command (#3183) (#3184)
|
||||
* feat: Supports the validate-false option at an app level. Closes #1063 (#2542)
|
||||
* feat: add dest cluster and namespace in the Events (#3093)
|
||||
* feat: Rollback disables auto sync issue #2441 (#2591)
|
||||
* feat: allow ssh and http repository references in bitbucketserver webhook #2773 (#3036)
|
||||
* feat: Add helm --set-file support (#2751)
|
||||
* feat: Include resource group for Event's InvolvedObject.APIVersion
|
||||
* feat: Add argocd cmd for Windows #2121 (#3015)
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
- fix: app reconciliation fails with panic: index out of (#3233)
|
||||
- fix: upgrade argoproj/pkg version to fix leaked sensitive information in logs (#3230)
|
||||
- fix: set MaxCallSendMsgSize to MaxGRPCMessageSize for the GRPC caller (#3117)
|
||||
- fix: stop caching helm index (#3193)
|
||||
- fix: dex proxy should forward request to dex preserving the basehref (#3165)
|
||||
- fix: set default login redirect to baseHRef (#3164)
|
||||
- fix: don't double-prepend basehref to redirect URLs (fixes #3137)
|
||||
- fix: ui referring to /api/version using absolute path (#3092)
|
||||
- fix: Unhang UI on long app info items by using more sane URL match pattern (#3159)
|
||||
- fix: Allow multiple hostnames per SSH known hosts entry and also allow IPv6 (#2814) (#3074)
|
||||
- fix: argocd-util backup produced truncated backups. import app status (#3096)
|
||||
- fix: upgrade redis-ha chart and enable haproxy (#3147)
|
||||
- fix: make dex server deployment init container resilient to restarts (#3136)
|
||||
- fix: reduct secret values of manifests stored in git (#3088)
|
||||
- fix: labels not being deleted via UI (#3081)
|
||||
- fix: HTTP|HTTPS|NO_PROXY env variable reading #3055 (#3063)
|
||||
- fix: Correct usage text for repo add command regarding insecure repos (#3068)
|
||||
- fix: Ensure SSH private key is written out with a final newline character (#2890) (#3064)
|
||||
- fix: Handle SSH URLs in 'git@server:org/repo' notation correctly (#3062)
|
||||
- fix sso condition when several sso connectors has been configured (#3057)
|
||||
- fix: Fix bug where the same pointer is used. (#3059)
|
||||
- fix: Opening in new tab bad key binding on Linux (#3020)
|
||||
- fix: K8s secrets for repository credential templates are not deleted when credential template is deleted (#3028)
|
||||
- fix: SSH credential template not working #3016
|
||||
- fix: Unable to parse kubectl pre-release version strings (#3034)
|
||||
- fix: Jsonnet TLA parameters of same type are overwritten (#3022)
|
||||
- fix: Replace aws-iam-authenticator to support IRSA (#3010)
|
||||
- fix: Hide bindPW in dex config (#3025)
|
||||
- fix: SSH repo URL with a user different from `git` is not matched correctly when resolving a webhook (#2988)
|
||||
- fix: JWT invalid => Password for superuser has changed since token issued (#2108)
|
||||
|
||||
#### Contributors
|
||||
* alexandrfox
|
||||
* alexec
|
||||
* alexmt
|
||||
* bergur88
|
||||
* CBytelabs
|
||||
* dbeal-wiser
|
||||
* dnascimento
|
||||
* Elgarni
|
||||
* eSamS
|
||||
* gpaul
|
||||
* jannfis
|
||||
* jdmulloy
|
||||
* machgo
|
||||
* masa213f
|
||||
* matthyx
|
||||
* rayanebel
|
||||
* shelby-moore
|
||||
* tomcruise81
|
||||
* wecger
|
||||
* zeph
|
||||
|
||||
## v1.4.2 (2020-01-24)
|
||||
|
||||
- fix: correctly replace cache in namespace isolation mode (#3023)
|
||||
|
||||
## v1.4.1 (2020-01-23)
|
||||
|
||||
- fix: impossible to config RBAC if group name includes ',' (#3013)
|
||||
|
||||
## v1.4.0 (2020-01-17)
|
||||
|
||||
The v1.4.0 is a stability release that brings multiple bug fixes, security, performance enhancements, and multiple usability improvements.
|
||||
|
||||
#### New Features
|
||||
|
||||
#### Security
|
||||
A number of security enhancements and features have been implemented (thanks to [@jannfis](https://github.com/jannfis) for driving it! ):
|
||||
* **Repository Credential Templates Management UI/CLI**. Now you can use Argo CD CLI or UI to configure
|
||||
[credentials template](https://argoproj.github.io/argo-cd/user-guide/private-repositories/#credential-templates) for multiple repositories!
|
||||
* **X-Frame-Options header on serving static assets**. The X-Frame-Options prevents third party sites to trick users into interacting with the application.
|
||||
* **Tighten AppProject RBAC enforcement**. We've improved the enforcement of access rules specified in the
|
||||
[application project](https://argoproj.github.io/argo-cd/operator-manual/declarative-setup/#projects) configuration.
|
||||
|
||||
#### Namespace Isolation
|
||||
With the namespace isolation feature, you are no longer have to give full read-only cluster access to the Argo CD. Instead, you can give access only to selected namespaces with-in
|
||||
the cluster:
|
||||
|
||||
```bash
|
||||
argocd cluster add <mycluster> --namespace <mynamespace1> --namespace <mynamespace2>
|
||||
```
|
||||
|
||||
This feature is useful if you don't have full cluster access but still want to use Argo CD to manage some cluster namespaces. The feature also improves performance if Argo CD is
|
||||
used to manage a few namespaces of a large cluster.
|
||||
|
||||
#### Reconciliation Performance
|
||||
The Argo CD no longer fork/exec `kubectl` to apply resource changes in the target cluster or convert resource manifest to the required manifest version. This reduces
|
||||
CPU and Memory usage of large Argo CD instances.
|
||||
|
||||
#### Resources Health based Hook Status
|
||||
The existing Argo CD [resource hooks](https://argoproj.github.io/argo-cd/user-guide/resource_hooks/) feature allows running custom logic during the syncing process. You can mark
|
||||
any Kubernetes resource as a hook and Argo CD assess hook status if resource is a `Pod`, `Job` or `Argo Workflow`. In the v1.4.0 release Argo CD is going to leverage resource
|
||||
[health assessment](https://argoproj.github.io/argo-cd/operator-manual/health/) to get sync hook status. This allows using any custom CRD as a sync hook and leverage custom health
|
||||
check logic.
|
||||
|
||||
#### Manifest Generation
|
||||
* **Track Helm Charts By Semantic Version**. You've been able to track charts hosted in Git repositories using branches to tags. This is now possible for Helm charts. You no longer
|
||||
need to choose the exact version, such as v1.4.0 ,instead you can use a semantic version constraint such as v1.4.* and the latest version that matches will be installed.
|
||||
* **Build Environment Variables**. Feature allows config management tool to get access to app details during manifest generation via
|
||||
[environment variables](https://argoproj.github.io/argo-cd/user-guide/build-environment/).
|
||||
* **Git submodules**. Argo CD is going to automatically fetch sub-modules if your repository has `.gitmodules` directory.
|
||||
|
||||
#### UI and CLI
|
||||
* **Improved Resource Tree View**. The Application details page got even prettier. The resource view was tuned to fit more resources into the screen, include more information about
|
||||
each resource and don't lose usability at the same time.
|
||||
* **New Account Management CLI Command**. The CLI allows to check which actions are allowed for your account: `argocd account can-i sync applications '*'`
|
||||
|
||||
#### Maintenance Tools
|
||||
The team put more effort into building tools that help to maintain Argo CD itself:
|
||||
* **Bulk Project Editing**. The `argocd-util` allows to add and remove permissions defined in multiple project roles using one command.
|
||||
* **More Prometheus Metrics**. A set of additional metrics that contains useful information managed clusters is exposed by application controller.
|
||||
|
||||
More documentation and tools are coming in patch releases.
|
||||
|
||||
#### Breaking Changes
|
||||
|
||||
The Argo CD deletes all **in-flight** hooks if you terminate running sync operation. The hook state assessment change implemented in this release the Argo CD enables detection of
|
||||
an in-flight state for all Kubernetes resources including `Deployment`, `PVC`, `StatefulSet`, `ReplicaSet` etc. So if you terminate the sync operation that has, for example,
|
||||
`StatefulSet` hook that is `Progressing` it will be deleted. The long-running jobs are not supposed to be used as a sync hook and you should consider using
|
||||
[Sync Waves](https://argoproj.github.io/argo-cd/user-guide/sync-waves/) instead.
|
||||
|
||||
#### Enhancements
|
||||
* feat: Add custom healthchecks for cert-manager v0.11.0 (#2689)
|
||||
* feat: add git submodule support (#2495)
|
||||
* feat: Add repository credential management API and CLI (addresses #2136) (#2207)
|
||||
* feat: add support for --additional-headers cli flag (#2467)
|
||||
* feat: Add support for ssh-with-port repo url (#2866) (#2948)
|
||||
* feat: Add Time to ApplicationCondition. (#2417)
|
||||
* feat: Adds `argocd auth can-i` command. Close #2255
|
||||
* feat: Adds revision history limit. Closes #2790 (#2818)
|
||||
* feat: Adds support for ARGO_CD_[TARGET_REVISION|REVISION] and pass to Custom Tool/Helm/Jsonnet
|
||||
* feat: Adds support for Helm charts to be a semver range. Closes #2552 (#2606)
|
||||
* feat: Adds tracing to key external invocations. (#2811)
|
||||
* feat: argocd-util should allow editing project policies in bulk (#2615)
|
||||
* feat: Displays controllerrevsion's revision in the UI. Closes #2306 (#2702)
|
||||
* feat: Issue #2559 - Add gauge Prometheus metric which represents the number of pending manifest requests. (#2658)
|
||||
* feat: Make ConvertToVersion maybe 1090% faster on average (#2820)
|
||||
* feat: namespace isolation (#2839)
|
||||
* feat: removes redundant mutex usage in controller cache and adds cluster cache metrics (#2898)
|
||||
* feat: Set X-Frame-Options on serving static assets (#2706) (#2711)
|
||||
* feat: Simplify using Argo CD without users/SSO/UI (#2688)
|
||||
* feat: Template Out Data Source in Grafana Dashboard (#2859)
|
||||
* feat: Updates UI icons. Closes #2625 and #2757 (#2653)
|
||||
* feat: use editor arguments in InteractiveEditor (#2833)
|
||||
* feat: Use kubectl apply library instead of forking binary (#2861)
|
||||
* feat: use resource health for hook status evaluation (#2938)
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
- fix: Adds support for /api/v1/account* via HTTP. Fixes #2664 (#2701)
|
||||
- fix: Allow '@'-character in SSH usernames when connecting a repository (#2612)
|
||||
- fix: Allow dot in project policy. Closes #2724 (#2755)
|
||||
- fix: Allow you to sync local Helm apps. Fixes #2741 (#2747)
|
||||
- fix: Allows Helm parameters that contains arrays or maps. (#2525)
|
||||
- fix: application-controller doesn't deal with rm/add same cluster gracefully (x509 unknown) (#2389)
|
||||
- fix: diff local ignore kustomize build options (#2942)
|
||||
- fix: Ensures that Helm charts are correctly resolved before sync. Fixes #2758 (#2760)
|
||||
- fix: Fix 'Open application' link when using basehref (#2729)
|
||||
- fix: fix a bug with cluster add when token secret is not first in list. (#2744)
|
||||
- fix: fix bug where manifests are not cached. Fixes #2770 (#2771)
|
||||
- fix: Fixes bug whereby retry does not work for CLI. Fixes #2767 (#2768)
|
||||
- fix: git contention leads applications into Unknown state (#2877)
|
||||
- fix: Issue #1944 - Gracefully handle missing cached app state (#2464)
|
||||
- fix: Issue #2668 - Delete a specified context (#2669)
|
||||
- fix: Issue #2683 - Make sure app update don't fail due to concurrent modification (#2852)
|
||||
- fix: Issue #2721 Optimize helm repo querying (#2816)
|
||||
- fix: Issue #2853 - Improve application env variables/labels editing (#2856)
|
||||
- fix: Issue 2848 - Application Deployment history panel shows incorrect info for recent releases (#2849)
|
||||
- fix: Make BeforeHookCreation the default. Fixes #2754 (#2759)
|
||||
- fix: No error on `argocd app create` in CLI if `--revision` is omitted #2665
|
||||
- fix: Only delete resources during app delete cascade if permitted to (fixes #2693) (#2695)
|
||||
- fix: prevent user from seeing/deleting resources not permitted in project (#2908) (#2910)
|
||||
- fix: self-heal should retry syncing an application after specified delay
|
||||
- fix: stop logging dex config secrets #(2904) (#2937)
|
||||
- fix: stop using jsondiffpatch on clientside to render resource difference (#2869)
|
||||
- fix: Target Revision truncated #2736
|
||||
- fix: UI should re-trigger SSO login if SSO JWT token expires (#2891)
|
||||
- fix: update argocd-util import was not working properly (#2939)
|
||||
|
||||
#### Contributors
|
||||
|
||||
* Aalok Ahluwalia
|
||||
* Aananth K
|
||||
* Abhishek Jaisingh
|
||||
* Adam Johnson
|
||||
* Alan Tang
|
||||
* Alex Collins
|
||||
* Alexander Matyushentsev
|
||||
* Andrew Waters
|
||||
* Byungjin Park
|
||||
* Christine Banek
|
||||
* Daniel Helfand
|
||||
* David Hong
|
||||
* David J. M. Karlsen
|
||||
* David Maciel
|
||||
* Devan Goodwin
|
||||
* Devin Stein
|
||||
* dthomson25
|
||||
* Gene Liverman
|
||||
* Gregor Krmelj
|
||||
* Guido Maria Serra
|
||||
* Ilir Bekteshi
|
||||
* Imran Ismail
|
||||
* INOUE BANJI
|
||||
* Isaac Gaskin
|
||||
* jannfis
|
||||
* Jeff Hastings
|
||||
* Jesse Suen
|
||||
* John Girvan
|
||||
* Konstantin
|
||||
* Lev Aminov
|
||||
* Manatsawin Hanmongkolchai
|
||||
* Marco Schmid
|
||||
* Masayuki Ishii
|
||||
* Michael Bridgen
|
||||
* Naoki Oketani
|
||||
* niqdev
|
||||
* nitinpatil1992
|
||||
* Olivier Boukili
|
||||
* Olivier Lemasle
|
||||
* Omer Kahani
|
||||
* Paul Brit
|
||||
* Qingbo Zhou
|
||||
* Saradhi Sreegiriraju
|
||||
* Scott Cabrinha
|
||||
* shlo
|
||||
* Simon Behar
|
||||
* stgarf
|
||||
* Yujun Zhang
|
||||
* Zoltán Reegn
|
||||
|
||||
## v1.3.4 (2019-12-05)
|
||||
- #2819 Fixes logging of tracing option in CLI
|
||||
|
||||
## v1.3.3 (2019-12-05)
|
||||
- #2721 High CPU utilisation (5 cores) and spammy logs
|
||||
|
||||
## v1.3.2 (2019-12-03)
|
||||
- #2797 Fix directory traversal edge case and enhance tests
|
||||
|
||||
## v1.3.1 (2019-12-02)
|
||||
- #2664 update account password from API resulted 404
|
||||
- #2724 Can't use `DNS-1123` compliant app name when creating project role
|
||||
- #2726 App list does not show chart for Helm app
|
||||
- #2741 argocd local sync cannot parse kubernetes version
|
||||
- #2754 BeforeHookCreation should be the default hook
|
||||
- #2767 Fix bug whereby retry does not work for CLI
|
||||
- #2770 Always cache miss for manifests
|
||||
- #1345 argocd-application-controller: can not retrieve list of objects using index : Index with name namespace does not exist
|
||||
|
||||
## v1.3.0 (2019-11-13)
|
||||
|
||||
#### New Features
|
||||
|
||||
@@ -22,7 +432,9 @@ https://youtu.be/GP7xtrnNznw
|
||||
|
||||
##### Orphan Resources
|
||||
|
||||
Some users would like to make sure that resources in a namespace are managed only by Argo CD. So we've introduced the concept of an "orphan resource" - any resource that is in namespace associated with an app, but not managed by Argo CD. This is enable in the project settings. Once enabled, Argo CD will show in the app view any resources in the app's namepspace that is not mananged by Argo CD.
|
||||
Some users would like to make sure that resources in a namespace are managed only by Argo CD. So we've introduced the concept of an "orphan resource" - any resource that is in namespace associated with an app, but not managed by Argo CD. This is enabled in the project settings. Once enabled, Argo CD will show in the app view any resources in the app's namepspace that is not mananged by Argo CD.
|
||||
|
||||
https://youtu.be/9ZoTevVQf5I
|
||||
|
||||
##### Sync Windows
|
||||
|
||||
@@ -30,137 +442,110 @@ There may be instances when you want to control the times during which an Argo C
|
||||
|
||||
#### Enhancements
|
||||
|
||||
* Issue #2396 argocd list command should have filter options like by pr… (#2421)
|
||||
* Adds support for Helm 1st-class. Closes #1145 (#1865)
|
||||
* Issue #1167 - Implement orphan resources support (#2103)
|
||||
* Helm hooks. Closes #355 (#2069)
|
||||
* Adds support for a literal YAML block of Helm values. Closes #1930 (#2057)
|
||||
* Adds support for hook-delete-policy: BeforeHookCreation. Closes #2036 (#2048)
|
||||
* Adds support for setting Helm string parameters via CLI. Closes #2078 (#2109)
|
||||
* [UI] Add application labels to Applications list and Applications details page (#1099)
|
||||
* Helm repository as first class Argo CD Application source (#1145)
|
||||
* Ability to generate a warn/alert when a namespace deviates from the expected state (#1167)
|
||||
* Improve diff support for resource requests/limits (#1615)
|
||||
* HTTP API should allow JWT to be passed via Authorization header (#1642)
|
||||
* Ability to create & upsert projects from spec (#1852)
|
||||
* Support for in-line block from helm chart values (#1930)
|
||||
* Request OIDC groups claim if groups scope is not supported (#1956)
|
||||
* Add a maintenance window for Applications with automated syncing (#1995)
|
||||
* Support `argocd.argoproj.io/hook-delete-policy: BeforeHookCreation` (#2036)
|
||||
* Support setting Helm string parameters using CLI/UI (#2078)
|
||||
* Config management plugin environment variable UI/CLI support (#2203)
|
||||
* Helm: auto-detect URLs (#2260)
|
||||
* Helm: UI improvements (#2261)
|
||||
* Support `helm template --kube-version ` (#2275)
|
||||
* Use community icons for resources (#2277)
|
||||
* Make `group` optional for `ignoreDifferences` config (#2298)
|
||||
* Update Helm docs (#2315)
|
||||
* Add cluster information into Splunk (#2354)
|
||||
* argocd list command should have filter options like by project (#2396)
|
||||
* Add target/current revision to status badge (#2445)
|
||||
* Update tooling to use Kustomize v3 (#2487)
|
||||
* Update root `Dockerfile` to use the `hack/install.sh` (#2488)
|
||||
* Support and document using HPA for repo-server (#2559)
|
||||
* Upgrade Helm (#2587)
|
||||
* UI fixes for "Sync Apps" panel. (#2604)
|
||||
* Upgrade kustomize from v3.1.0 to v3.2.1 (#2609)
|
||||
* Map helm lifecycle hooks to ArgoCD pre/post/sync hooks (#355)
|
||||
* [UI] Enhance app creation page with Helm parameters overrides (#1059)
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
- Issue #2484 - Impossible to edit chart name using App details page (#2485)
|
||||
- Issue #2185 - Manual sync don't trigger hooks (#2477)
|
||||
- Issue #2453 - Application controller sometimes accidentally removes duplicated/excluded resource warning condition (#2454)
|
||||
- Issue #1944 - Gracefully handle missing cached app state (#2464)
|
||||
- Issue #2321 - Hook deletion should not fail if error message is not found (#2458)
|
||||
- Issue #2448 - Custom resource actions cannot be executed from the UI (#2449)
|
||||
- Issue #2339 - Make sure controller uses latest git version if app reconciliation result expired (#2346)
|
||||
- Issue #2290 - Fix nil pointer dereference in application controller (#2291)
|
||||
- Issue #2245 - Intermittent "git ls-remote" request failures should not fail app reconciliation (#2281)
|
||||
- Issue #2022 - Support limiting number of concurrent kubectl fork/execs (#2264)
|
||||
- Fix degraded proxy support for http(s) git repository (#2243) (#2249)
|
||||
- Issue #2198 - Print empty string instead of Unknown in 'argocd app sync' output (#2223)
|
||||
- Fix for displaying hooks in app diff view. Fixes #2215 (#2218)
|
||||
- Issue #2212 - Correctly handle trailing slash in configured URL while creating redirect URL (#2214)
|
||||
- Deals with race condition when deleting resource. Closes #2141 (#2200)
|
||||
- Issue #2192 - SyncError app condition disappears during app reconciliation (#2193)
|
||||
- Adds test for updating immutable field, adds UI button to allow force from UI. See #2150 (#2155)
|
||||
- Issue #2174 - Fix git repo url parsing on application list view (#2175)
|
||||
- Issue #2146 - Fix nil pointer dereference error during app reconciliation (#2170)
|
||||
- Issue #2114 - Fix history api fallback implementation to support app names with dots (#2168)
|
||||
- Issue #2060 - Endpoint incorrectly considered top level managed resource (#2129)
|
||||
- Fixed truncation of group in UI. Closes #2006 (#2128)
|
||||
- Allow adding certs for hostnames ending on a dot (fixes #2116) (#2120)
|
||||
- Escape square brackets in pattern matching hostnames (fixes #2099) (#2113)
|
||||
- failed parsing on parameters with comma (#1660)
|
||||
- Statefuleset with OnDelete Update Strategy stuck progressing (#1881)
|
||||
- Warning during secret diffing (#1923)
|
||||
- Error message "Unable to load data: key is missing" is confusing (#1944)
|
||||
- OIDC group bindings are truncated (#2006)
|
||||
- Multiple parallel app syncs causes OOM (#2022)
|
||||
- Unknown error when setting params with argocd app set on helm app (#2046)
|
||||
- Endpoint is no longer shown as a child of services (#2060)
|
||||
- SSH known hosts entry cannot be deleted if contains shell pattern in name (#2099)
|
||||
- Application 404s on names with periods (#2114)
|
||||
- Adding certs for hostnames ending with a dot (.) is not possible (#2116)
|
||||
- Fix `TestHookDeleteBeforeCreation` (#2141)
|
||||
- v1.2.0-rc1 nil pointer dereference when syncing (#2146)
|
||||
- Replacing services failure (#2150)
|
||||
- 1.2.0-rc1 - Authentication Required error in Repo Server (#2152)
|
||||
- v1.2.0-rc1 Applications List View doesn't work (#2174)
|
||||
- Manual sync does not trigger Presync hooks (#2185)
|
||||
- SyncError app condition disappears during app reconciliation (#2192)
|
||||
- argocd app wait\sync prints 'Unknown' for resources without health (#2198)
|
||||
- 1.2.0-rc2 Warning during secret diffing (#2206)
|
||||
- SSO redirect url is incorrect if configured Argo CD URL has trailing slash (#2212)
|
||||
- Application summary diff page shows hooks (#2215)
|
||||
- An app with a single resource and Sync hook remains progressing (#2216)
|
||||
- CONTRIBUTING documentation outdated (#2231)
|
||||
- v1.2.0-rc2 does not retrieve http(s) based git repository behind the proxy (#2243)
|
||||
- Intermittent "git ls-remote" request failures should not fail app reconciliation (#2245)
|
||||
- Result of ListApps operation for Git repo is cached incorrectly (#2263)
|
||||
- ListApps does not utilize cache (#2287)
|
||||
- Controller panics due to nil pointer error (#2290)
|
||||
- The Helm --kube-version support does not work on GKE: (#2303)
|
||||
- Fixes bug that prevents you creating repos via UI/CLI. (#2308)
|
||||
- The 'helm.repositories' settings is dropped without migration path (#2316)
|
||||
- Badge response does not contain cache control header (#2317)
|
||||
- Inconsistent sync result from UI and CLI (#2321)
|
||||
- Failed edit application with plugin type requiring environment (#2330)
|
||||
- AutoSync doesn't work anymore (#2339)
|
||||
- End-to-End tests not working with Kubernetes v1.16 (#2371)
|
||||
- Creating an application from Helm repository should select "Helm" as source type (#2378)
|
||||
- The parameters of ValidateAccess GRPC method should not be logged (#2386)
|
||||
- Maintenance window meaning is confusing (#2398)
|
||||
- UI bug when targetRevision is ommited (#2407)
|
||||
- Too many vulnerabilities in Docker image (#2425)
|
||||
- proj windows commands not consistent with other commands (#2443)
|
||||
- Custom resource actions cannot be executed from the UI (#2448)
|
||||
- Application controller sometimes accidentally removes duplicated/excluded resource warning condition (#2453)
|
||||
- Logic that checks sync windows state in the cli is incorrect (#2455)
|
||||
- UI don't allow to create window with `* * * * *` schedule (#2475)
|
||||
- Helm Hook is executed twice if annotated with both pre-install and pre-upgrade annotations (#2480)
|
||||
- Impossible to edit chart name using App details page (#2484)
|
||||
- ArgoCD does not provide CSRF protection (#2496)
|
||||
- ArgoCD failing to install CRDs in master from Helm Charts (#2497)
|
||||
- Timestamp in Helm package file name causes error in Application with Helm source (#2549)
|
||||
- Attempting to create a repo with password but not username panics (#2567)
|
||||
- UI incorrectly mark resources as `Required Pruning` (#2577)
|
||||
- argocd app diff prints only first difference (#2616)
|
||||
- Bump min client cache version (#2619)
|
||||
- Cluster list page fails if any cluster is not reachable (#2620)
|
||||
- Repository type should be mandatory for repo add command in CLI (#2622)
|
||||
- Repo server executes unnecessary ls-remotes (#2626)
|
||||
- Application list page incorrectly filter apps by label selector (#2633)
|
||||
- Custom actions are disabled in Argo CD UI (#2635)
|
||||
- Failure of `argocd version` in the self-building container image (#2645)
|
||||
- Application list page is not updated automatically anymore (#2655)
|
||||
- Login regression issues (#2659)
|
||||
- Regression: Cannot return Kustomize version for 3.1.0 (#2662)
|
||||
- API server does not allow creating role with action `action/*` (#2670)
|
||||
- Application controller `kubectl-parallelism-limit` flag is broken (#2673)
|
||||
- Annoying toolbar flickering (#2691)
|
||||
|
||||
#### Other
|
||||
## v1.2.5 (2019-10-29)
|
||||
|
||||
- Fix possible path traversal attack when supporting Helm `values.yaml` (#2452)
|
||||
- Fix UI crash on application list page (#2490)
|
||||
- add support for --additional-headers cli flag (#2467)
|
||||
- Allow collapse/expand helm values text (#2469)
|
||||
- Update base image to Debian buster (#2431)
|
||||
- Error with new `actions run` suggestion (#2434)
|
||||
- Detach ArgoCD from specific workflow API (#2428)
|
||||
- Add application labels to Applications list and Applications details page (#2430)
|
||||
- Fix JS error on application creation page if no plugins configured (#2432)
|
||||
- Add missing externalURL for networking.k8s.io Ingress type (#2390)
|
||||
- App status panel shows metadata of current revision in git instead of most recent reconciled revision (#2419)
|
||||
- Adds support for plugin params. (#2406)
|
||||
- Granular RBAC Support for actions (#2110)
|
||||
- Added Kustomize, Helm, and Kubectl to `argocd version` (#2329)
|
||||
- Stop unnecessary re-loading clusters on every app list page re-render (#2411)
|
||||
- Add project level maintenance windows for applications (#2380)
|
||||
- Make argo-cd docker images openshift friendly (#2362)
|
||||
- Add dest-server and dest-namespace field to reconciliation logs (#2388)
|
||||
- Add custom action example to argocd-cm.yaml (#2375)
|
||||
- Try out community icons. (#2349)
|
||||
- Make `group` optional for `ignoreDifferences` setting (#2335)
|
||||
- Adds support for Github Enterprise URLs (#2344)
|
||||
- Add argocd project as variable to grafana dashboard (#2336)
|
||||
- Fix missing envs when updating application of content management plugin type (#2331)
|
||||
- util/localconfig: prefer HOME env var over os/user (#2326)
|
||||
- Auto-detect Helm repos + support Helm basic auth + fix bugs (#2309)
|
||||
- Add cache-control HTTP header to badge response (#2328)
|
||||
- Document flags/env variables useful for performance tuning (#2312)
|
||||
- Re-enable caching when listing apps. (#2295)
|
||||
- Fixes bug in `argocd repo list` and tidy up UI (#2307)
|
||||
- Add restart action to Deployment/StatefulSet/DaemonSet (#2300)
|
||||
- Clean-up the kube-version from Helm so that we can support GKE. (#2304)
|
||||
- Fixes issue diffing secrets (#2271)
|
||||
- Add --self-heal flag to argocd cli (#2296)
|
||||
- Support --kube-version. (#2276)
|
||||
- Fix building error when following CONTRIBUTING.md (#2278)
|
||||
- Adding information to make local execution more accessible (#2279)
|
||||
- API clients may use the HTTP Authorization header for auth. (#2262)
|
||||
- Fix TestAutoSyncSelfHealEnabled test flakiness (#2282)
|
||||
- Change Helm repo URLs to argoproj/argo-cd/master (#2266)
|
||||
- Fix/grafana datasources (#2229)
|
||||
- If there is only one wave and no pre/post hooks, we should be synced.… (#2217)
|
||||
- Create projects from manifests (#2202)
|
||||
- Fix JS crash in EditablePanel component (#2222)
|
||||
- Use same /24 network for testing immutable field update (#2213)
|
||||
- Add path to externalURLs (#2208)
|
||||
- support OIDC claims request (#1957)
|
||||
- Better detection for authorization_code OIDC response type (#2164)
|
||||
- Allow list actions to return yaml or json (#1805)
|
||||
- Adds a floating action button with help and chat links to every page.… (#2125)
|
||||
- Temporary disable Git LFS test to unblock release (#2172)
|
||||
- Determine the manifest version from the VERSION file when on release branch (#2166)
|
||||
- Enhances cookie warning with actual length to help users fix their co… (#2134)
|
||||
- Fixed routing issue for periods (#2162)
|
||||
- Added more health filters in UI (#2160)
|
||||
- Added 'SyncFail' to possible HookTypes in UI (#2153)
|
||||
- Indicate that `SyncFail` hooks are on v1.2+ (#2149)
|
||||
- Adds checks around valid paths for apps (#2133)
|
||||
- Minor CLI bug fixes (#2132)
|
||||
- Adds support for a literal YAML block of Helm values. Closes #1930 (#2057)
|
||||
- Fixed truncation of group in UI. Closes #2006 (#2128)
|
||||
- Redact secrets using "+" rather than "*" as this is base 64 compatiba… (#2119)
|
||||
|
||||
#### Contributors
|
||||
|
||||
* Aalok Ahluwalia <!-- num=1 -->
|
||||
* Adam Johnson <!-- num=4 -->
|
||||
* Alex Collins <!-- num=62 -->
|
||||
* Alexander Matyushentsev <!-- num=58 -->
|
||||
* Andrew Waters <!-- num=2 -->
|
||||
* Ben Doyle <!-- num=1 -->
|
||||
* Chris Jones <!-- num=1 -->
|
||||
* Fred Dubois <!-- num=1 -->
|
||||
* Gregor Krmelj <!-- num=2 -->
|
||||
* Gustav Paul <!-- num=2 -->
|
||||
* Isaac Gaskin <!-- num=1 -->
|
||||
* Jesse Suen <!-- num=1 -->
|
||||
* John Reese <!-- num=1 -->
|
||||
* Mitz Amano <!-- num=1 -->
|
||||
* Olivier Boukili <!-- num=1 -->
|
||||
* Olivier Lemasle <!-- num=1 -->
|
||||
* Rayyis <!-- num=1 -->
|
||||
* Rodolphe Prin <!-- num=1 -->
|
||||
* Ryota <!-- num=2 -->
|
||||
* Seiya Muramatsu <!-- num=1 -->
|
||||
* Simon Behar <!-- num=11 -->
|
||||
* Sverre Boschman <!-- num=1 -->
|
||||
* Tom Wieczorek <!-- num=3 -->
|
||||
* Yujun Zhang <!-- num=4 -->
|
||||
* Zoltán Reegn <!-- num=1 -->
|
||||
* agabet <!-- num=1 -->
|
||||
* dthomson25 <!-- num=2 -->
|
||||
* jannfis <!-- num=8 -->
|
||||
* ssbtn <!-- num=2 -->
|
||||
- Issue #2339 - Don't update `status.reconciledAt` unless compared with latest git version (#2581)
|
||||
|
||||
## v1.2.4 (2019-10-23)
|
||||
|
||||
|
||||
15
Dockerfile
15
Dockerfile
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=debian:10-slim
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM golang:1.12.6 as builder
|
||||
FROM golang:1.14.1 as builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -30,9 +30,9 @@ RUN ./install.sh dep-linux
|
||||
RUN ./install.sh packr-linux
|
||||
RUN ./install.sh kubectl-linux
|
||||
RUN ./install.sh ksonnet-linux
|
||||
RUN ./install.sh helm2-linux
|
||||
RUN ./install.sh helm-linux
|
||||
RUN ./install.sh kustomize-linux
|
||||
RUN ./install.sh aws-iam-authenticator-linux
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
@@ -50,16 +50,17 @@ RUN groupadd -g 999 argocd && \
|
||||
chmod g=u /home/argocd && \
|
||||
chmod g=u /etc/passwd && \
|
||||
apt-get update && \
|
||||
apt-get install -y git git-lfs && \
|
||||
apt-get install -y git git-lfs python3-pip && \
|
||||
apt-get clean && \
|
||||
pip3 install awscli==1.18.80 && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
COPY hack/git-ask-pass.sh /usr/local/bin/git-ask-pass.sh
|
||||
COPY --from=builder /usr/local/bin/ks /usr/local/bin/ks
|
||||
COPY --from=builder /usr/local/bin/helm2 /usr/local/bin/helm2
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
|
||||
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl
|
||||
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
|
||||
COPY --from=builder /usr/local/bin/aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
|
||||
# script to add current (possibly arbitrary) user to /etc/passwd at runtime
|
||||
# (if it's not already there, to be openshift friendly)
|
||||
COPY uid_entrypoint.sh /usr/local/bin/uid_entrypoint.sh
|
||||
@@ -96,7 +97,7 @@ RUN NODE_ENV='production' yarn build
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM golang:1.12.6 as argocd-build
|
||||
FROM golang:1.14.1 as argocd-build
|
||||
|
||||
COPY --from=builder /usr/local/bin/dep /usr/local/bin/dep
|
||||
COPY --from=builder /usr/local/bin/packr /usr/local/bin/packr
|
||||
@@ -115,7 +116,8 @@ RUN cd ${GOPATH}/src/dummy && \
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
COPY . .
|
||||
RUN make cli server controller repo-server argocd-util && \
|
||||
make CLI_NAME=argocd-darwin-amd64 GOOS=darwin cli
|
||||
make CLI_NAME=argocd-darwin-amd64 GOOS=darwin cli && \
|
||||
make CLI_NAME=argocd-windows-amd64.exe GOOS=windows cli
|
||||
|
||||
|
||||
####################################################################################################
|
||||
@@ -124,4 +126,3 @@ RUN make cli server controller repo-server argocd-util && \
|
||||
FROM argocd-base
|
||||
COPY --from=argocd-build /go/src/github.com/argoproj/argo-cd/dist/argocd* /usr/local/bin/
|
||||
COPY --from=argocd-ui ./src/dist/app /shared/app
|
||||
|
||||
|
||||
78
Gopkg.lock
generated
78
Gopkg.lock
generated
@@ -66,16 +66,38 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:52905b00a73cda93a2ce8c5fa35185daed673d59e39576e81ad6ab6fb7076b3c"
|
||||
digest = "1:3cce78d5d0090e3f1162945fba60ba74e72e8422e8e41bb9c701afb67237bb65"
|
||||
name = "github.com/alicebob/gopher-json"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5a6b3ba71ee69b77cf64febf8b5a7526ca5eaef0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:18a07506ddaa87b1612bfd69eef03f510faf122398df3da774d46dcfe751a060"
|
||||
name = "github.com/alicebob/miniredis"
|
||||
packages = [
|
||||
".",
|
||||
"server",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "3d7aa1333af56ab862d446678d93aaa6803e0938"
|
||||
version = "v2.7.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fbe4779f247babd0b21e2283d419f848a8dab42c0f6bbdeb88d83f190f52c159"
|
||||
name = "github.com/argoproj/pkg"
|
||||
packages = [
|
||||
"errors",
|
||||
"exec",
|
||||
"grpc/http",
|
||||
"kubeclientmetrics",
|
||||
"rand",
|
||||
"stats",
|
||||
"time",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "02a6aac40ac4cd23de448afe7a1ec0ba4b6d2b96"
|
||||
revision = "f46beff7cd548bafc0264b95a4efed645fb1862c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d8a2bb36a048d1571bcc1aee208b61f39dc16c6c53823feffd37449dde162507"
|
||||
@@ -93,6 +115,14 @@
|
||||
pruneopts = ""
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6e56c4e58a4359f844880c50b3c0a154c73c51c9399c2d16fa79221a6a110598"
|
||||
name = "github.com/bsm/redislock"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "3d76f17a9f1e22d11521f78267ce1795641ae596"
|
||||
version = "v0.4.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6e2b0748ea11cffebe87b4a671a44ecfb243141cdd5df54cb44b7e8e93cb7ea3"
|
||||
name = "github.com/casbin/casbin"
|
||||
@@ -466,6 +496,17 @@
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:dcf8316121302735c0ac84e05f4686e3b34e284444435e9a206da48d8be18cb1"
|
||||
name = "github.com/gomodule/redigo"
|
||||
packages = [
|
||||
"internal",
|
||||
"redis",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "9c11da706d9b7902c6da69c592f75637793fe121"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6"
|
||||
name = "github.com/google/btree"
|
||||
@@ -489,15 +530,19 @@
|
||||
version = "v0.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:14d826ee25139b4674e9768ac287a135f4e7c14e1134a5b15e4e152edfd49f41"
|
||||
digest = "1:ce886a0d7c4737485b1d72453aa6f22282c021fffe209abe33220d54d0e2e670"
|
||||
name = "github.com/google/go-jsonnet"
|
||||
packages = [
|
||||
".",
|
||||
"ast",
|
||||
"parser",
|
||||
"astgen",
|
||||
"internal/errors",
|
||||
"internal/parser",
|
||||
"internal/program",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "dfddf2b4e3aec377b0dcdf247ff92e7d078b8179"
|
||||
revision = "70a6b3d419d9ee16a144345c35e0305052c6f2d9"
|
||||
version = "v0.15.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -515,6 +560,14 @@
|
||||
pruneopts = ""
|
||||
revision = "c34317bd91bf98fab745d77b03933cf8769299fe"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ad92aa49f34cbc3546063c7eb2cabb55ee2278b72842eda80e2a20a8a06a8d73"
|
||||
name = "github.com/google/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2a131706ff80636629ab6373f2944569b8252ecc018cda8040931b05d32e3c16"
|
||||
name = "github.com/googleapis/gnostic"
|
||||
@@ -950,15 +1003,15 @@
|
||||
version = "v0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
|
||||
digest = "1:cc4eb6813da8d08694e557fcafae8fcc24f47f61a0717f952da130ca9a486dfc"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"mock",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
revision = "3ebf1ddaeb260c4b1ae502a01c7844fa8c1fa0e9"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:51cf0fca93f4866709ceaf01b750e51d997c299a7bd2edf7ccd79e3b428754ae"
|
||||
@@ -1928,12 +1981,16 @@
|
||||
"bou.ke/monkey",
|
||||
"github.com/Masterminds/semver",
|
||||
"github.com/TomOnTime/utfutil",
|
||||
"github.com/alicebob/miniredis",
|
||||
"github.com/argoproj/pkg/errors",
|
||||
"github.com/argoproj/pkg/exec",
|
||||
"github.com/argoproj/pkg/grpc/http",
|
||||
"github.com/argoproj/pkg/kubeclientmetrics",
|
||||
"github.com/argoproj/pkg/stats",
|
||||
"github.com/argoproj/pkg/time",
|
||||
"github.com/bsm/redislock",
|
||||
"github.com/casbin/casbin",
|
||||
"github.com/casbin/casbin/model",
|
||||
"github.com/casbin/casbin/persist",
|
||||
"github.com/coreos/go-oidc",
|
||||
"github.com/dgrijalva/jwt-go",
|
||||
"github.com/dustin/go-humanize",
|
||||
@@ -1957,6 +2014,7 @@
|
||||
"github.com/golang/protobuf/ptypes/empty",
|
||||
"github.com/google/go-jsonnet",
|
||||
"github.com/google/shlex",
|
||||
"github.com/google/uuid",
|
||||
"github.com/grpc-ecosystem/go-grpc-middleware",
|
||||
"github.com/grpc-ecosystem/go-grpc-middleware/auth",
|
||||
"github.com/grpc-ecosystem/go-grpc-middleware/logging",
|
||||
@@ -2039,6 +2097,7 @@
|
||||
"k8s.io/apimachinery/pkg/types",
|
||||
"k8s.io/apimachinery/pkg/util/intstr",
|
||||
"k8s.io/apimachinery/pkg/util/jsonmergepatch",
|
||||
"k8s.io/apimachinery/pkg/util/net",
|
||||
"k8s.io/apimachinery/pkg/util/runtime",
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch",
|
||||
"k8s.io/apimachinery/pkg/util/wait",
|
||||
@@ -2061,6 +2120,7 @@
|
||||
"k8s.io/client-go/tools/cache",
|
||||
"k8s.io/client-go/tools/clientcmd",
|
||||
"k8s.io/client-go/tools/clientcmd/api",
|
||||
"k8s.io/client-go/tools/pager",
|
||||
"k8s.io/client-go/tools/portforward",
|
||||
"k8s.io/client-go/transport/spdy",
|
||||
"k8s.io/client-go/util/flowcontrol",
|
||||
|
||||
15
Gopkg.toml
15
Gopkg.toml
@@ -86,7 +86,7 @@ required = [
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.2"
|
||||
version = "1.5.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gobuffalo/packr"
|
||||
@@ -115,3 +115,16 @@ required = [
|
||||
[[override]]
|
||||
name = "github.com/evanphx/json-patch"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/uuid"
|
||||
version = "1.1.1"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/alicebob/miniredis"
|
||||
version = "2.7.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/bsm/redislock"
|
||||
version = "0.4.3"
|
||||
|
||||
272
Makefile
272
Makefile
@@ -9,10 +9,82 @@ GIT_COMMIT=$(shell git rev-parse HEAD)
|
||||
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
|
||||
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
|
||||
PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run vendor/github.com/gobuffalo/packr/packr/main.go"; fi)
|
||||
VOLUME_MOUNT=$(shell [[ $(go env GOOS)=="darwin" ]] && echo ":delegated" || echo "")
|
||||
VOLUME_MOUNT=$(shell if test "$(go env GOOS)"=="darwin"; then echo ":delegated"; elif test selinuxenabled; then echo ":Z"; else echo ""; fi)
|
||||
|
||||
define run-in-dev-tool
|
||||
docker run --rm -it -u $(shell id -u) -e HOME=/home/user -v ${CURRENT_DIR}:/go/src/github.com/argoproj/argo-cd${VOLUME_MOUNT} -w /go/src/github.com/argoproj/argo-cd argocd-dev-tools bash -c "GOPATH=/go $(1)"
|
||||
GOPATH?=$(shell if test -x `which go`; then go env GOPATH; else echo "$(HOME)/go"; fi)
|
||||
GOCACHE?=$(HOME)/.cache/go-build
|
||||
|
||||
DOCKER_SRCDIR?=$(GOPATH)/src
|
||||
DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
ARGOCD_PROCFILE?=Procfile
|
||||
|
||||
# Configuration for building argocd-test-tools image
|
||||
TEST_TOOLS_NAMESPACE?=argoproj
|
||||
TEST_TOOLS_IMAGE=argocd-test-tools
|
||||
TEST_TOOLS_TAG?=v0.5.0
|
||||
ifdef TEST_TOOLS_NAMESPACE
|
||||
TEST_TOOLS_PREFIX=${TEST_TOOLS_NAMESPACE}/
|
||||
endif
|
||||
|
||||
# You can change the ports where ArgoCD components will be listening on by
|
||||
# setting the appropriate environment variables before running make.
|
||||
ARGOCD_E2E_APISERVER_PORT?=8080
|
||||
ARGOCD_E2E_REPOSERVER_PORT?=8081
|
||||
ARGOCD_E2E_REDIS_PORT?=6379
|
||||
ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
|
||||
ARGOCD_LINT_GOGC?=20
|
||||
|
||||
# Runs any command in the argocd-test-utils container in server mode
|
||||
# Server mode container will start with uid 0 and drop privileges during runtime
|
||||
define run-in-test-server
|
||||
docker run --rm -it \
|
||||
--name argocd-test-server \
|
||||
-e USER_ID=$(shell id -u) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
-e ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
-e ARGOCD_E2E_TEST=$(ARGOCD_E2E_TEST) \
|
||||
-e ARGOCD_E2E_YARN_HOST=$(ARGOCD_E2E_YARN_HOST) \
|
||||
-v ${DOCKER_SRCDIR}:/go/src${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
|
||||
-v /tmp:/tmp${VOLUME_MOUNT} \
|
||||
-w ${DOCKER_WORKDIR} \
|
||||
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
|
||||
-p 4000:4000 \
|
||||
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
|
||||
bash -c "$(1)"
|
||||
endef
|
||||
|
||||
# Runs any command in the argocd-test-utils container in client mode
|
||||
define run-in-test-client
|
||||
docker run --rm -it \
|
||||
--name argocd-test-client \
|
||||
-u $(shell id -u) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
-e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \
|
||||
-v ${DOCKER_SRCDIR}:/go/src${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
|
||||
-v /tmp:/tmp${VOLUME_MOUNT} \
|
||||
-w ${DOCKER_WORKDIR} \
|
||||
$(TEST_TOOLS_NAMESPACE)/$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
|
||||
bash -c "$(1)"
|
||||
endef
|
||||
|
||||
#
|
||||
define exec-in-test-server
|
||||
docker exec -it -u $(shell id -u) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
endef
|
||||
|
||||
PATH:=$(PATH):$(PWD)/hack
|
||||
@@ -38,6 +110,8 @@ endif
|
||||
ifneq (${GIT_TAG},)
|
||||
IMAGE_TAG=${GIT_TAG}
|
||||
LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
|
||||
else
|
||||
IMAGE_TAG?=latest
|
||||
endif
|
||||
|
||||
ifeq (${DOCKER_PUSH},true)
|
||||
@@ -53,6 +127,10 @@ endif
|
||||
.PHONY: all
|
||||
all: cli image argocd-util
|
||||
|
||||
.PHONY: gogen
|
||||
gogen:
|
||||
go generate ./util/argo/...
|
||||
|
||||
.PHONY: protogen
|
||||
protogen:
|
||||
./hack/generate-proto.sh
|
||||
@@ -66,21 +144,25 @@ clientgen:
|
||||
./hack/update-codegen.sh
|
||||
|
||||
.PHONY: codegen-local
|
||||
codegen-local: protogen clientgen openapigen manifests-local
|
||||
codegen-local: gogen protogen clientgen openapigen manifests-local
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: dev-tools-image
|
||||
$(call run-in-dev-tool,make codegen-local)
|
||||
codegen:
|
||||
$(call run-in-test-client,make codegen-local)
|
||||
|
||||
.PHONY: cli
|
||||
cli: clean-debug
|
||||
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
|
||||
|
||||
.PHONY: cli-docker
|
||||
go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd/argocd
|
||||
|
||||
.PHONY: release-cli
|
||||
release-cli: clean-debug image
|
||||
docker create --name tmp-argocd-linux $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)
|
||||
docker cp tmp-argocd-linux:/usr/local/bin/argocd ${DIST_DIR}/argocd-linux-amd64
|
||||
docker cp tmp-argocd-linux:/usr/local/bin/argocd-darwin-amd64 ${DIST_DIR}/argocd-darwin-amd64
|
||||
docker cp tmp-argocd-linux:/usr/local/bin/argocd-windows-amd64.exe ${DIST_DIR}/argocd-windows-amd64.exe
|
||||
docker rm tmp-argocd-linux
|
||||
|
||||
.PHONY: argocd-util
|
||||
@@ -88,17 +170,23 @@ argocd-util: clean-debug
|
||||
# Build argocd-util as a statically linked binary, so it could run within the alpine-based dex container (argoproj/argo-cd#844)
|
||||
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
|
||||
|
||||
.PHONY: dev-tools-image
|
||||
dev-tools-image:
|
||||
cd hack && docker build -t argocd-dev-tools . -f Dockerfile.dev-tools
|
||||
# .PHONY: dev-tools-image
|
||||
# dev-tools-image:
|
||||
# docker build -t $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE) . -f hack/Dockerfile.dev-tools
|
||||
# docker tag $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE) $(DEV_TOOLS_PREFIX)$(DEV_TOOLS_IMAGE):$(DEV_TOOLS_VERSION)
|
||||
|
||||
.PHONY: test-tools-image
|
||||
test-tools-image:
|
||||
docker build -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
|
||||
.PHONY: manifests-local
|
||||
manifests-local:
|
||||
./hack/update-manifests.sh
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: dev-tools-image
|
||||
$(call run-in-dev-tool,make manifests-local IMAGE_TAG='${IMAGE_TAG}')
|
||||
manifests: test-tools-image
|
||||
$(call run-in-test-client,make manifests-local IMAGE_TAG='${IMAGE_TAG}')
|
||||
|
||||
|
||||
# NOTE: we use packr to do the build instead of go, since we embed swagger files and policy.csv
|
||||
@@ -134,6 +222,7 @@ image: packr
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-util ./cmd/argocd-util
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd/argocd
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-darwin-amd64 ./cmd/argocd
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 dist/packr build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-windows-amd64.exe ./cmd/argocd
|
||||
cp Dockerfile.dev dist
|
||||
docker build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
@@ -147,40 +236,121 @@ builder-image:
|
||||
docker build -t $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) --target builder .
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-cd-ci-builder:$(IMAGE_TAG) ; fi
|
||||
|
||||
# Pulls in all vendor dependencies
|
||||
.PHONY: dep
|
||||
dep:
|
||||
$(call run-in-test-client,dep ensure -v)
|
||||
|
||||
# Pulls in all vendor dependencies (local version)
|
||||
.PHONY: dep-local
|
||||
dep-local:
|
||||
dep ensure -v
|
||||
|
||||
# Pulls in all unvendored dependencies
|
||||
.PHONY: dep-ensure
|
||||
dep-ensure:
|
||||
$(call run-in-test-client,dep ensure -no-vendor -v)
|
||||
|
||||
# Pulls in all unvendored dependencies (local version)
|
||||
.PHONY: dep-ensure-local
|
||||
dep-ensure-local:
|
||||
dep ensure -no-vendor
|
||||
|
||||
# Runs dep check in a container to ensure Gopkg.lock is up-to-date with dependencies
|
||||
.PHONY: dep-check
|
||||
dep-check:
|
||||
$(call run-in-test-client,make dep-check-local)
|
||||
|
||||
# Runs dep check locally to ensure Gopkg.lock is up-to-date with dependencies
|
||||
.PHONY: dep-check-local
|
||||
dep-check-local:
|
||||
if ! dep check -skip-vendor; then echo "Please make sure Gopkg.lock is up-to-date - see https://argoproj.github.io/argo-cd/developer-guide/faq/#why-does-the-build-step-fail"; exit 1; fi
|
||||
|
||||
# Deprecated - replace by install-local-tools
|
||||
.PHONY: install-lint-tools
|
||||
install-lint-tools:
|
||||
./hack/install.sh lint-tools
|
||||
|
||||
# Run linter on the code
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golangci-lint --version
|
||||
golangci-lint run --fix --verbose
|
||||
$(call run-in-test-client,make lint-local)
|
||||
|
||||
# Run linter on the code (local version)
|
||||
.PHONY: lint-local
|
||||
lint-local:
|
||||
golangci-lint --version
|
||||
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
|
||||
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose --timeout 300s
|
||||
|
||||
.PHONY: lint-ui
|
||||
lint-ui:
|
||||
$(call run-in-test-client,make lint-ui-local)
|
||||
|
||||
.PHONY: lint-ui-local
|
||||
lint-ui-local:
|
||||
cd ui && yarn lint
|
||||
|
||||
# Build all Go code
|
||||
.PHONY: build
|
||||
build:
|
||||
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
|
||||
mkdir -p $(GOCACHE)
|
||||
$(call run-in-test-client, make build-local)
|
||||
|
||||
# Build all Go code (local version)
|
||||
.PHONY: build-local
|
||||
build-local:
|
||||
go build -p 1 -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
|
||||
|
||||
# Run all unit tests
|
||||
#
|
||||
# If TEST_MODULE is set (to fully qualified module name), only this specific
|
||||
# module will be tested.
|
||||
.PHONY: test
|
||||
test:
|
||||
./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`
|
||||
mkdir -p $(GOCACHE)
|
||||
$(call run-in-test-client,make TEST_MODULE=$(TEST_MODULE) test-local)
|
||||
|
||||
# Run all unit tests (local version)
|
||||
.PHONY: test-local
|
||||
test-local:
|
||||
if test "$(TEST_MODULE)" = ""; then \
|
||||
./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
|
||||
else \
|
||||
./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
|
||||
fi
|
||||
|
||||
# Run the E2E test suite. E2E test servers (see start-e2e target) must be
|
||||
# started before.
|
||||
.PHONY: test-e2e
|
||||
test-e2e:
|
||||
./hack/test.sh -timeout 15m ./test/e2e
|
||||
test-e2e:
|
||||
$(call exec-in-test-server,make test-e2e-local)
|
||||
|
||||
# Run the E2E test suite (local version)
|
||||
.PHONY: test-e2e-local
|
||||
test-e2e-local: cli
|
||||
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
|
||||
NO_PROXY=* ./hack/test.sh -timeout 15m -v ./test/e2e
|
||||
|
||||
# Spawns a shell in the test server container for debugging purposes
|
||||
debug-test-server:
|
||||
$(call run-in-test-server,/bin/bash)
|
||||
|
||||
# Spawns a shell in the test client container for debugging purposes
|
||||
debug-test-client:
|
||||
$(call run-in-test-client,/bin/bash)
|
||||
|
||||
# Starts e2e server in a container
|
||||
.PHONY: start-e2e
|
||||
start-e2e: cli
|
||||
killall goreman || true
|
||||
# check we can connect to Docker to start Redis
|
||||
start-e2e:
|
||||
docker version
|
||||
mkdir -p ${GOCACHE}
|
||||
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-e2e-local)
|
||||
|
||||
# Starts e2e server locally (or within a container)
|
||||
.PHONY: start-e2e-local
|
||||
start-e2e-local:
|
||||
kubectl create ns argocd-e2e || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply -f -
|
||||
@@ -189,7 +359,9 @@ start-e2e: cli
|
||||
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
goreman start
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start
|
||||
|
||||
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
|
||||
.PHONY: clean-debug
|
||||
@@ -202,17 +374,28 @@ clean: clean-debug
|
||||
|
||||
.PHONY: start
|
||||
start:
|
||||
killall goreman || true
|
||||
# check we can connect to Docker to start Redis
|
||||
docker version
|
||||
kubectl create ns argocd || true
|
||||
kubens argocd
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
goreman start
|
||||
$(call run-in-test-server,make ARGOCD_PROCFILE=test/container/Procfile start-local ARGOCD_START=${ARGOCD_START})
|
||||
|
||||
# Starts a local instance of ArgoCD
|
||||
.PHONY: start-local
|
||||
start-local:
|
||||
# check we can connect to Docker to start Redis
|
||||
killall goreman || true
|
||||
kubectl create ns argocd || true
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=false \
|
||||
ARGOCD_E2E_TEST=false \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
|
||||
# Runs pre-commit validaiton with the virtualized toolchain
|
||||
.PHONY: pre-commit
|
||||
pre-commit: dep-ensure codegen build lint test
|
||||
|
||||
# Runs pre-commit validation with the local toolchain
|
||||
.PHONY: pre-commit-local
|
||||
pre-commit-local: dep-ensure-local codegen-local build-local lint-local test-local
|
||||
|
||||
.PHONY: release-precheck
|
||||
release-precheck: manifests
|
||||
@if [ "$(GIT_TREE_STATE)" != "clean" ]; then echo 'git tree state is $(GIT_TREE_STATE)' ; exit 1; fi
|
||||
@@ -237,4 +420,37 @@ lint-docs:
|
||||
|
||||
.PHONY: publish-docs
|
||||
publish-docs: lint-docs
|
||||
mkdocs gh-deploy
|
||||
mkdocs gh-deploy
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
.PHONY: verify-kube-connect
|
||||
verify-kube-connect:
|
||||
$(call run-in-test-client,kubectl version)
|
||||
|
||||
# Show the Go version of local and virtualized environments
|
||||
.PHONY: show-go-version
|
||||
show-go-version:
|
||||
@echo -n "Local Go version: "
|
||||
@go version
|
||||
@echo -n "Docker Go version: "
|
||||
$(call run-in-test-client,go version)
|
||||
|
||||
# Installs all tools required to build and test ArgoCD locally
|
||||
.PHONY: install-tools-local
|
||||
install-tools-local:
|
||||
./hack/install.sh dep-linux
|
||||
./hack/install.sh packr-linux
|
||||
./hack/install.sh kubectl-linux
|
||||
./hack/install.sh ksonnet-linux
|
||||
./hack/install.sh helm2-linux
|
||||
./hack/install.sh helm-linux
|
||||
./hack/install.sh codegen-tools
|
||||
./hack/install.sh codegen-go-tools
|
||||
./hack/install.sh lint-tools
|
||||
|
||||
.PHONY: dep-ui
|
||||
dep-ui:
|
||||
$(call run-in-test-client,make dep-ui-local)
|
||||
|
||||
dep-ui-local:
|
||||
cd ui && yarn install
|
||||
|
||||
8
OWNERS
8
OWNERS
@@ -1,12 +1,12 @@
|
||||
owners:
|
||||
- alexec
|
||||
- alexmt
|
||||
- jessesuen
|
||||
|
||||
reviewers:
|
||||
- jannfis
|
||||
|
||||
approvers:
|
||||
- alexec
|
||||
- alexmt
|
||||
- dthomson25
|
||||
- jannfis
|
||||
- jessesuen
|
||||
- mayzhang2000
|
||||
- rachelwang20
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,6 +1,6 @@
|
||||
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --staticassets ui/dist/app"
|
||||
dex: sh -c "go run github.com/argoproj/argo-cd/cmd/argocd-util gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/dexidp/dex:v2.14.0 serve /dex.yaml"
|
||||
dex: sh -c "go run github.com/argoproj/argo-cd/cmd/argocd-util gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/dexidp/dex:v2.22.0 serve /dex.yaml"
|
||||
redis: docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} redis:5.0.3-alpine --save "" --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}
|
||||
repo-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-repo-server/main.go --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
|
||||
57
README.md
57
README.md
@@ -12,66 +12,17 @@ Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.
|
||||
|
||||
## Why Argo CD?
|
||||
|
||||
Application definitions, configurations, and environments should be declarative and version controlled.
|
||||
|
||||
Application deployment and lifecycle management should be automated, auditable, and easy to understand.
|
||||
|
||||
1. Application definitions, configurations, and environments should be declarative and version controlled.
|
||||
1. Application deployment and lifecycle management should be automated, auditable, and easy to understand.
|
||||
|
||||
## Who uses Argo CD?
|
||||
|
||||
Organizations below are **officially** using Argo CD. Please send a PR with your organization name if you are using Argo CD.
|
||||
|
||||
1. [127Labs](https://127labs.com/)
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
|
||||
1. [Baloise](https://www.baloise.com)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
1. [Cybozu](https://cybozu-global.com)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [Elium](https://www.elium.com)
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Future PLC](https://www.futureplc.com/)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [hipages](https://hipages.com.au/)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
1. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
1. [Major League Baseball](https://mlb.com)
|
||||
1. [Mambu](https://www.mambu.com/)
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
1. [OpenSaaS Studio](https://opensaas.studio)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Syncier](https://syncier.com/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [Tiger Analytics](https://www.tigeranalytics.com/)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [Yieldlab](https://www.yieldlab.de/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
[Official Argo CD user list](USERS.md)
|
||||
|
||||
## Documentation
|
||||
|
||||
To learn more about Argo CD [go to the complete documentation](https://argoproj.github.io/argo-cd/).
|
||||
Check live demo at https://cd.apps.argoproj.io/.
|
||||
|
||||
## Community Blogs and Presentations
|
||||
|
||||
|
||||
8
SECURITY_CONTACTS
Normal file
8
SECURITY_CONTACTS
Normal file
@@ -0,0 +1,8 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://argoproj.github.io/argo-cd/security_considerations/#reporting-vulnerabilities
|
||||
|
||||
alexmt
|
||||
edlee2121
|
||||
jessesuen
|
||||
60
USERS.md
Normal file
60
USERS.md
Normal file
@@ -0,0 +1,60 @@
|
||||
## Who uses Argo CD?
|
||||
|
||||
As the Argo Community grows, we'd like to keep track of our users. Please send a PR with your organization name if you are using Argo CD.
|
||||
|
||||
Currently, the following organizations are **officially** using Argo CD:
|
||||
|
||||
1. [127Labs](https://127labs.com/)
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH ](https://www.arz.at/)
|
||||
1. [Baloise](https://www.baloise.com)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
1. [Cybozu](https://cybozu-global.com)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [Elium](https://www.elium.com)
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Future PLC](https://www.futureplc.com/)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Healy](https://www.healyworld.net)
|
||||
1. [hipages](https://hipages.com.au/)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
1. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
1. [Major League Baseball](https://mlb.com)
|
||||
1. [Mambu](https://www.mambu.com/)
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
1. [MOO Print](https://www.moo.com/)
|
||||
1. [OpenSaaS Studio](https://opensaas.studio)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Prudential](https://prudential.com.sg)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [Robotinfra](https://www.robotinfra.com)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Syncier](https://syncier.com/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [ThousandEyes](https://www.thousandeyes.com/)
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
1. [Tiger Analytics](https://www.tigeranalytics.com/)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
|
||||
1. [Yieldlab](https://www.yieldlab.de/)
|
||||
1. [MTN Group](https://www.mtn.com/)
|
||||
@@ -1,22 +1,24 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="131" height="20">
|
||||
<linearGradient id="b" x2="0" y2="100%">
|
||||
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
|
||||
<stop offset="1" stop-opacity=".1"/>
|
||||
</linearGradient>
|
||||
<clipPath id="a">
|
||||
<rect width="131" height="20" rx="3" fill="#fff"/>
|
||||
<svg width="131" height="20" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" >
|
||||
<defs>
|
||||
<filter id="dropShadow">
|
||||
<feDropShadow dx="0.2" dy="0.4" stdDeviation="0.2" flood-color="#333" flood-opacity="0.5"/>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<clipPath id="roundedCorners">
|
||||
<rect width="100%" height="100%" rx="3" opacity="1" />
|
||||
</clipPath>
|
||||
<g clip-path="url(#a)">
|
||||
<path id="leftPath" fill="#555" d="M0 0h74v20H0z"/>
|
||||
<path id="rightPath" fill="#4c1" d="M74 0h57v20H74z"/>
|
||||
<path fill="url(#b)" d="M0 0h131v20H0z"/>
|
||||
|
||||
<g clip-path="url(#roundedCorners)">
|
||||
<rect id="leftRect" fill="#555" x="0" y="0" width="74" height="20" />
|
||||
<rect id="rightRect" fill="#4c1" x="74" y="0" width="57" height="20" />
|
||||
<rect id="revisionRect" fill="#4c1" x="131" y="0" width="62" height="20" display="none"/>
|
||||
</g>
|
||||
<g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="90">
|
||||
|
||||
<g fill="#fff" style="filter: url(#dropShadow);" text-anchor="middle" font-family="DejaVu Sans, sans-serif" font-size="90">
|
||||
<image x="5" y="3" width="14" height="14" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB8AAAAeCAYAAADU8sWcAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAAABPAAAATwFjiv3XAAACC2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KD0UqkwAACpZJREFUSA11VnmQFcUZ//qY4527by92WWTXwHKqEJDDg0MQUioKSWopK4oaS9RAlMofUSPGbFlBSaViKomgQSNmIdECkUS8ophdozEJArKoKCsgIFlY2Pu9N29mero7X78FxCR2Vb8309PTv+/4fb9vCHz1II2Nm+jmzYul2bJsdlMyO7LyPGlZtYqQck1ZHCgwLZWiROeJgB7bDzpYb/7o0y/emzXv4Pts8+ZGBUC0uf/vQf57wdw3NTVRnOYFfXvj6pJcadkUmbDGRoxVSEItSYAQQrUmRBP81VoRpkFTJUMuZA/3g/28q3dn89b7u885D4348vgf8NPAxY033LJmSlDizlSOU94f6UhoHaZdC/3QCHWON2gDYBhAYyRAWgyD4QSi1815f29++vv/+CoD2Lm2nAFGl8mndzyxMMgk5/iW6xzPi/xVk+oyE8+vLNt14FR/0uYW14ox0IwUJ4ZAaUpAaaBEYByiyLbikWWNnnThvPIxu+L717auVeb81tbWsyk4C34u8I3LnlhcSCameMzykg7TwzOJGIYWQj+M+voLYcSo/hxY1E65FECUq4G4RFP0nSjMCMVAIIKUnOHkw90L6qq/vXvbh02trV8yoBh2E0NMY9GiG+58fIGXTF6epyw7JOnamZQbz/tCnDrVVzgqqdwTT0ZTdWDN0wPpMh3ZPZqHLSw98C7Y4RwtnAodsTwGAg0ppkcxrlkYJFID+Z0bn/zelsFImzQRXfScNJFiOG689dcT/FT6GzlqedVJTHRpPH6yz/PyfTlx2IrLCpvSdWrv5OXkX9fOJB/Mnaz3XzItap+yMDoyZgEE7F1SceIghryeRNwnTBkEqhRIxiLF6bCpDXNybW2v/ruxcTzbt2+zZmfCvWT+zxNhbek3cxaPlyYsXVWSSHT25rxC3o+OWK6aDn7sZ3r79ZNY28w45Esxuhw5RjmVvEQPlIwgR8bOiE7VdrPaA2+TeGEEhFbhtAEmG5pzk5WqqbWTPv7jC8sLpgxNSRWZrUeWTAgor7EY9ytTiURvzg8GskGU5xxKkc33yDcX1yc7x3ijLxF+zZgoEljgCrSiXOWIG/WGrjifHxl1V9iyeKwW1lHNo5SKmDmcYinivwhjVpkor55sQj9+/D4MO9bpLW8RN6zJzMvbPDUkHaecE368N1+IUUJfIm5hPf1w2kTedlm/LgntmqEWEQGT3ScJYUh2NIxiiWupSUFasor1VgzXvPATfv6BC3Roq0E9MLqAXKQcAe1rqi/dv+q3DwQUw6djY6tiAaG1jLEw7nAn64WhcStPqZqpA6chap8aage077GwbQcJDx8Awq3T3DHlbeijUHWwytHFuuizCTergdQ+YocxLRHVPNcESRAJxqo60nbSvExvvvWx23MCVvucDyRt5hitwDRHHG09pZiaT7MlSUdUhtQGZttYRdwA4WnFQjFnDA4LX7UdIlgMYpaouBgGMnuBKVN/ZhgBRCMVynNAHOfRm777q4UcrW3EZxcEnD/kclYlpVaFKFIZrIRtluvdls/G9InnLZ9fpLXoJjwZAzZkDJ5mjkIj8HTCKETH9oD0eoikMSDQ5cTrLrUg4QoaFlxUH5MdYy6yAFAaYBHed6ODkMPOkMcFZggZKa0SkaL/jMeDpq5D1Vez9693V/wNUg1jQRY8yLX8BcSba8AaOgyBOdqtQBz7DNj8lZCeNY9SN6aiD3bTOc89s2iliDVvKa3uGxf6doQAxYE2oBbm0HQPG5IxqKhOg9ZpSSQj+pDU+jv5t7913spV5c70GcqLJwgMGw6ZJUuBLbgHPW0D4rgQdewDvnAllN58B0D918AvzVBn3jWi/p4Hq2/s+et1URgBRlVRMGVvBtITfwyuyYPAoIRmWSIrEqjZW1Ml/qN7dtRWzptfBzXDoaeri6TicWhes8Zsg9iMOaAyU0D3fw4qdSEkrryquP77xx+HZCwG3T09HEZfCBUXTxh125H9VZ9SHp1OfZF4aEWI9MM1AhmiVCmaIwV6a2F5ge2q8s7OBB8ytPiOZduw6pFHYOS4cUUQGsNWXj4MVLYdaGU9UAQ0o2H8eHh49WpAITReKqdmGC0PsvF2JB4Gu+i5RqKg5JSi82mOnu9GYGEhwbFjSOXadJII7Pa6Ed3hwU8igAU8lU7D/ffdZ1pFEUT19YI+sR9oZgLIjo8h6usDu2oIzJpxOc4ZphKQtoqFxw6LzxIV/RMxkabSUPAJRz3A5oc50x9T7Lf3xkR0R0xGSS+IRIFQOUUUkqvGXdR5tGXnLmjbYXI0OLCJykIBvFe2ABVHgCQqgMpjkH9xE9IY7cSgGd0w+8mOVjiy68DOh+sauusiwSM8F11nlhCm39/SvP7ux4qNZVfba/0TJ183Is9opWtxEXe5E/cC+Y5bf+CilmczqaC/JigEOjjYTrwtzwB8tAn4kJFGMIEmy0B98gb4B7tAYimHncd19ObLrP25rW0/rb76pQqsoKTEkKJgoT64biCObVx35xvGGz67qYW3Nl0RWYH/ftxmo7pyfpSoKFFjEiLxhE73jXaueHXlGw+PZ68rS8pQ01QlodUjTqcAfcQo8qGjQB36EwQfbFAuF8wjZbmNtStefZ4nC0uibKIHez92AeZEUln5wocGuLGxyUbg2cVEFp5a9pFz11MH/ZCP6M0V8gmbxWZL35K2paFmUmjDgCXAMQkjqv8wgFMGBFVSCw8g1wWsrB5IBrTDAvB0eaAkhVHCeGzSQDUm3bE9/1hs/d7dBhy5aSqAaNNWN6Mvdn9+e0KGfl8hZCd6vBwqABmISKSkCgkqr/a6QMWrwLpsKaYWhfrwi1ikfWBNvQmUhXItAlznICMlcopFFNXcEA25wBw/EIms2L4O1onZs5u46abFUsILDWhAc/OKo7H+/OtuEFqaUcsmIE8CR47zEMsSuJvRQcceODVmOqR/uAHKftAC6R+9AKcmzgVxci9qexwLTIDZ34+MR6FVErXECXw7kc+3/G7j8gPG0dbWJmQnum1+zDj3U2rJ0rWzvFRynu/YskMTb5vetmwI7xyeg7R0g072ynEGwa0PwZTRDbDn0GGI1j0Ii2pCKPCyqJTl+eei9tOl5Kr1eU2seuG5ZMBv2fjUIMkQymAWC6jouQE3jdlYZa43PLnsLbcn++eYVwi6FCQktTCEmO3QAx2rhOllAl6bOwsahg2FDTMvhRklWVSUMkxcWEyHJFbUqyCW9AtRSV//K18AF4XmbOWeBTegCF78ujTXf3hm+XvTeo49G8/67Sg+A0a0OGc6CDzIlFbDL3+8GPYuuxLWP7AYysprIQx9YNjdzAglyVkD3qGvd3dsWvv0infM2qBj53zr49rZsJsNZwa2WeTTFxtP3L1oe1VCzh3AWqOM2FJJsFBwbMuCUAgQyAqGrVVJ7Zdwyz2RZS/X/GbrguJ5eNYgyhfnncH5v+DmYcft18Y1T5S7fl9jPMN+4aM6IpeQPmgxThNA5AnuJFhIeI2tHafiNqEUW1XQL+8NrfRznENP1drNuTOA5/5/KezmAZ4zaJBSdVaUvQk5PsNTeqfrMsKQOui5LGKibBAjmKgSRk/RIMlc8BiWCLbI95Dx05jybrMkfsh+xfgPf+hH0AC4OlsAAAAASUVORK5CYII="/>
|
||||
|
||||
<text id="leftText1" x="435" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="470"></text>
|
||||
<text id="leftText2" x="435" y="140" transform="scale(.1)" textLength="470"></text>
|
||||
|
||||
<text id="rightText1" x="995" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="470"></text>
|
||||
<text id="rightText1" x="995" y="140" transform="scale(.1)" textLength="470"></text></g>
|
||||
<text id="leftText" x="435" y="140" transform="scale(.1)" textLength="470"></text>
|
||||
<text id="rightText" x="995" y="140" transform="scale(.1)" textLength="470"></text>
|
||||
<text id="revisionText" x="1550" y="140" font-family="monospace" transform="scale(.1)" font-size="110" display="none"></text>
|
||||
</g>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 5.6 KiB After Width: | Height: | Size: 5.6 KiB |
@@ -11,6 +11,7 @@ p, role:readonly, certificates, get, *, allow
|
||||
p, role:readonly, clusters, get, *, allow
|
||||
p, role:readonly, repositories, get, *, allow
|
||||
p, role:readonly, projects, get, *, allow
|
||||
p, role:readonly, accounts, get, *, allow
|
||||
|
||||
p, role:admin, applications, create, */*, allow
|
||||
p, role:admin, applications, update, */*, allow
|
||||
@@ -30,6 +31,7 @@ p, role:admin, repositories, delete, *, allow
|
||||
p, role:admin, projects, create, *, allow
|
||||
p, role:admin, projects, update, *, allow
|
||||
p, role:admin, projects, delete, *, allow
|
||||
p, role:admin, accounts, update, *, allow
|
||||
|
||||
g, role:admin, role:readonly
|
||||
g, admin, role:admin
|
||||
|
||||
|
@@ -16,6 +16,22 @@
|
||||
"version": "version not set"
|
||||
},
|
||||
"paths": {
|
||||
"/api/v1/account": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"AccountService"
|
||||
],
|
||||
"operationId": "ListAccounts",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "(empty)",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/accountAccountsList"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/account/can-i/{resource}/{action}/{subresource}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -79,13 +95,99 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/account/{name}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"AccountService"
|
||||
],
|
||||
"operationId": "GetAccount",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "(empty)",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/accountAccount"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/account/{name}/token": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"AccountService"
|
||||
],
|
||||
"operationId": "CreateTokenMixin9",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/accountCreateTokenRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "(empty)",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/accountCreateTokenResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/account/{name}/token/{id}": {
|
||||
"delete": {
|
||||
"tags": [
|
||||
"AccountService"
|
||||
],
|
||||
"operationId": "DeleteTokenMixin9",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "(empty)",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/accountEmptyResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "List returns list of applications",
|
||||
"operationId": "ListMixin8",
|
||||
"operationId": "List",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -135,7 +237,7 @@
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "Create creates an application",
|
||||
"operationId": "CreateMixin8",
|
||||
"operationId": "Create",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
@@ -162,7 +264,7 @@
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "Update updates an application",
|
||||
"operationId": "UpdateMixin8",
|
||||
"operationId": "Update",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -293,7 +395,7 @@
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "Get returns an application by name",
|
||||
"operationId": "GetMixin8",
|
||||
"operationId": "Get",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -343,7 +445,7 @@
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "Delete deletes an application",
|
||||
"operationId": "DeleteMixin8",
|
||||
"operationId": "Delete",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -982,7 +1084,7 @@
|
||||
"ClusterService"
|
||||
],
|
||||
"summary": "List returns list of clusters",
|
||||
"operationId": "List",
|
||||
"operationId": "ListMixin4",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1004,7 +1106,7 @@
|
||||
"ClusterService"
|
||||
],
|
||||
"summary": "Create creates a cluster",
|
||||
"operationId": "Create",
|
||||
"operationId": "CreateMixin4",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
@@ -1031,7 +1133,7 @@
|
||||
"ClusterService"
|
||||
],
|
||||
"summary": "Update updates a cluster",
|
||||
"operationId": "Update",
|
||||
"operationId": "UpdateMixin4",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1064,7 +1166,7 @@
|
||||
"ClusterService"
|
||||
],
|
||||
"summary": "Get returns a cluster by server address",
|
||||
"operationId": "GetMixin2",
|
||||
"operationId": "GetMixin4",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1087,7 +1189,7 @@
|
||||
"ClusterService"
|
||||
],
|
||||
"summary": "Delete deletes a cluster",
|
||||
"operationId": "Delete",
|
||||
"operationId": "DeleteMixin4",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1137,7 +1239,7 @@
|
||||
"ProjectService"
|
||||
],
|
||||
"summary": "List returns list of projects",
|
||||
"operationId": "ListMixin6",
|
||||
"operationId": "ListMixin5",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1159,7 +1261,7 @@
|
||||
"ProjectService"
|
||||
],
|
||||
"summary": "Create a new project.",
|
||||
"operationId": "CreateMixin6",
|
||||
"operationId": "CreateMixin5",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
@@ -1186,7 +1288,7 @@
|
||||
"ProjectService"
|
||||
],
|
||||
"summary": "Get returns a project by name",
|
||||
"operationId": "GetMixin6",
|
||||
"operationId": "GetMixin5",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1209,7 +1311,7 @@
|
||||
"ProjectService"
|
||||
],
|
||||
"summary": "Delete deletes a project",
|
||||
"operationId": "DeleteMixin6",
|
||||
"operationId": "DeleteMixin5",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1284,7 +1386,7 @@
|
||||
"ProjectService"
|
||||
],
|
||||
"summary": "Update updates a project",
|
||||
"operationId": "UpdateMixin6",
|
||||
"operationId": "UpdateMixin5",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1803,7 +1905,7 @@
|
||||
"SettingsService"
|
||||
],
|
||||
"summary": "Get returns Argo CD settings",
|
||||
"operationId": "Get",
|
||||
"operationId": "GetMixin7",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "(empty)",
|
||||
@@ -1885,6 +1987,41 @@
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"accountAccount": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capabilities": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"format": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"tokens": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/accountToken"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"accountAccountsList": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/accountAccount"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"accountCanIResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1893,12 +2030,58 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"accountCreateTokenRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"expiresIn": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"title": "expiresIn represents a duration in seconds"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"accountCreateTokenResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"token": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"accountEmptyResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"accountToken": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"expiresAt": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"issuedAt": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"accountUpdatePasswordRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"currentPassword": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"newPassword": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -2159,6 +2342,12 @@
|
||||
"appLabelKey": {
|
||||
"type": "string"
|
||||
},
|
||||
"configManagementPlugins": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ConfigManagementPlugin"
|
||||
}
|
||||
},
|
||||
"dexConfig": {
|
||||
"$ref": "#/definitions/clusterDexConfig"
|
||||
},
|
||||
@@ -2171,6 +2360,12 @@
|
||||
"kustomizeOptions": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeOptions"
|
||||
},
|
||||
"kustomizeVersions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"oidcConfig": {
|
||||
"$ref": "#/definitions/clusterOIDCConfig"
|
||||
},
|
||||
@@ -2192,6 +2387,10 @@
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
},
|
||||
"userLoginsDisabled": {
|
||||
"type": "boolean",
|
||||
"format": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -2241,6 +2440,9 @@
|
||||
"format": "int64",
|
||||
"title": "expiresIn represents a duration in seconds"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"project": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2301,6 +2503,13 @@
|
||||
"type": "object",
|
||||
"title": "HelmAppSpec contains helm app name in source repo",
|
||||
"properties": {
|
||||
"fileParameters": {
|
||||
"type": "array",
|
||||
"title": "helm file parameters",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1HelmFileParameter"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2975,6 +3184,13 @@
|
||||
"$ref": "#/definitions/v1GroupKind"
|
||||
}
|
||||
},
|
||||
"namespaceResourceWhitelist": {
|
||||
"type": "array",
|
||||
"title": "NamespaceResourceWhitelist contains list of whitelisted namespace level resources",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1GroupKind"
|
||||
}
|
||||
},
|
||||
"orphanedResources": {
|
||||
"$ref": "#/definitions/v1alpha1OrphanedResourcesMonitorSettings"
|
||||
},
|
||||
@@ -3118,6 +3334,13 @@
|
||||
"type": "object",
|
||||
"title": "ApplicationSourceHelm holds helm specific options",
|
||||
"properties": {
|
||||
"fileParameters": {
|
||||
"type": "array",
|
||||
"title": "FileParameters are file parameters to the helm template",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1HelmFileParameter"
|
||||
}
|
||||
},
|
||||
"parameters": {
|
||||
"type": "array",
|
||||
"title": "Parameters are parameters to the helm template",
|
||||
@@ -3204,6 +3427,10 @@
|
||||
"nameSuffix": {
|
||||
"type": "string",
|
||||
"title": "NameSuffix is a suffix appended to resources for kustomize apps"
|
||||
},
|
||||
"version": {
|
||||
"type": "string",
|
||||
"title": "Version contains optional Kustomize version"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -3425,6 +3652,24 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1Command": {
|
||||
"type": "object",
|
||||
"title": "Command holds binary path and arguments list",
|
||||
"properties": {
|
||||
"args": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"command": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ComparedTo": {
|
||||
"type": "object",
|
||||
"title": "ComparedTo contains application source and target which was used for resources comparison",
|
||||
@@ -3437,6 +3682,21 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ConfigManagementPlugin": {
|
||||
"type": "object",
|
||||
"title": "ConfigManagementPlugin contains config management plugin configuration",
|
||||
"properties": {
|
||||
"generate": {
|
||||
"$ref": "#/definitions/v1alpha1Command"
|
||||
},
|
||||
"init": {
|
||||
"$ref": "#/definitions/v1alpha1Command"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ConnectionState": {
|
||||
"type": "object",
|
||||
"title": "ConnectionState contains information about remote resource connection state",
|
||||
@@ -3476,6 +3736,20 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1HelmFileParameter": {
|
||||
"type": "object",
|
||||
"title": "HelmFileParameter is a file parameter to a helm template",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name is the name of the helm parameter"
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"title": "Path is the path value for the helm parameter"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1HelmParameter": {
|
||||
"type": "object",
|
||||
"title": "HelmParameter is a parameter to a helm template",
|
||||
@@ -3531,6 +3805,9 @@
|
||||
"iat": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -3550,6 +3827,18 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KnownTypeField": {
|
||||
"type": "object",
|
||||
"title": "KnownTypeField contains mapping between CRD field and known Kubernetes type",
|
||||
"properties": {
|
||||
"field": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KsonnetParameter": {
|
||||
"type": "object",
|
||||
"title": "KsonnetParameter is a ksonnet component parameter",
|
||||
@@ -3569,6 +3858,10 @@
|
||||
"type": "object",
|
||||
"title": "KustomizeOptions are options for kustomize to use when building manifests",
|
||||
"properties": {
|
||||
"binaryPath": {
|
||||
"type": "string",
|
||||
"title": "BinaryPath holds optional path to kustomize binary"
|
||||
},
|
||||
"buildOptions": {
|
||||
"type": "string",
|
||||
"title": "BuildOptions is a string of build parameters to use when calling `kustomize build`"
|
||||
@@ -3579,11 +3872,29 @@
|
||||
"description": "Operation contains requested operation parameters.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"initiatedBy": {
|
||||
"$ref": "#/definitions/v1alpha1OperationInitiator"
|
||||
},
|
||||
"sync": {
|
||||
"$ref": "#/definitions/v1alpha1SyncOperation"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1OperationInitiator": {
|
||||
"type": "object",
|
||||
"title": "OperationInitiator holds information about the operation initiator",
|
||||
"properties": {
|
||||
"automated": {
|
||||
"description": "Automated is set to true if operation was initiated automatically by the application controller.",
|
||||
"type": "boolean",
|
||||
"format": "boolean"
|
||||
},
|
||||
"username": {
|
||||
"description": "Name of a user who started operation.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1OperationState": {
|
||||
"description": "OperationState contains information about state of currently performing operation on application.",
|
||||
"type": "object",
|
||||
@@ -4006,6 +4317,12 @@
|
||||
},
|
||||
"ignoreDifferences": {
|
||||
"type": "string"
|
||||
},
|
||||
"knownTypeFields": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1KnownTypeField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -4187,6 +4504,13 @@
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"syncOptions": {
|
||||
"type": "array",
|
||||
"title": "SyncOptions provide per-sync sync-options, e.g. Validate=false",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"syncStrategy": {
|
||||
"$ref": "#/definitions/v1alpha1SyncStrategy"
|
||||
}
|
||||
@@ -4233,6 +4557,13 @@
|
||||
"properties": {
|
||||
"automated": {
|
||||
"$ref": "#/definitions/v1alpha1SyncPolicyAutomated"
|
||||
},
|
||||
"syncOptions": {
|
||||
"type": "array",
|
||||
"title": "Options allow youe to specify whole app sync-options",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/go-redis/redis"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -19,13 +21,15 @@ import (
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/controller"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/reposerver/apiclient"
|
||||
cacheutil "github.com/argoproj/argo-cd/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/diff"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
"github.com/argoproj/argo-cd/util/settings"
|
||||
"github.com/argoproj/argo-cd/util/stats"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -49,6 +53,7 @@ func newCommand() *cobra.Command {
|
||||
metricsPort int
|
||||
kubectlParallelismLimit int64
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -59,8 +64,7 @@ func newCommand() *cobra.Command {
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
config.QPS = common.K8sClientConfigQPS
|
||||
config.Burst = common.K8sClientConfigBurst
|
||||
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
|
||||
|
||||
kubeClient := kubernetes.NewForConfigOrDie(config)
|
||||
appClient := appclientset.NewForConfigOrDie(config)
|
||||
@@ -78,6 +82,8 @@ func newCommand() *cobra.Command {
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
|
||||
kubectl := &kube.KubectlCmd{}
|
||||
legacyDiffDisabled := os.Getenv("ARGOCD_ENABLE_LEGACY_DIFF") == "false"
|
||||
diff.SetPopulateLegacyDiff(!legacyDiffDisabled)
|
||||
appController, err := controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -91,8 +97,10 @@ func newCommand() *cobra.Command {
|
||||
metricsPort,
|
||||
kubectlParallelismLimit)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
|
||||
log.Infof("Application Controller (version: %s) starting (namespace: %s)", common.GetVersion(), namespace)
|
||||
vers := common.GetVersion()
|
||||
log.Infof("Application Controller (version: %s, built: %s) starting (namespace: %s)", vers.Version, vers.BuildDate, namespace)
|
||||
stats.RegisterStackDumper()
|
||||
stats.StartStatsTicker(10 * time.Minute)
|
||||
stats.RegisterHeapDumper("memprofile")
|
||||
@@ -115,8 +123,9 @@ func newCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
|
||||
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", 5, "Specifies timeout between application self heal attempts")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", 20, "Number of allowed concurrent kubectl fork/execs. Any value less the 1 means no limit.")
|
||||
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/go-redis/redis"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -15,8 +17,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/reposerver"
|
||||
reposervercache "github.com/argoproj/argo-cd/reposerver/cache"
|
||||
"github.com/argoproj/argo-cd/reposerver/metrics"
|
||||
cacheutil "github.com/argoproj/argo-cd/util/cache"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/stats"
|
||||
"github.com/argoproj/argo-cd/util/tls"
|
||||
)
|
||||
|
||||
@@ -33,6 +35,7 @@ func newCommand() *cobra.Command {
|
||||
metricsPort int
|
||||
cacheSrc func() (*reposervercache.Cache, error)
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
redisClient *redis.Client
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -47,6 +50,7 @@ func newCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
cacheutil.CollectMetrics(redisClient, metricsServer)
|
||||
server, err := reposerver.NewServer(metricsServer, cache, tlsConfigCustomizer, parallelismLimit)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -72,7 +76,9 @@ func newCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&listenPort, "port", common.DefaultPortRepoServer, "Listen on given port for incoming connections")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
@@ -4,24 +4,28 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/go-redis/redis"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/server"
|
||||
servercache "github.com/argoproj/argo-cd/server/cache"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/stats"
|
||||
"github.com/argoproj/argo-cd/util/tls"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCommand returns a new instance of an argocd command
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
redisClient *redis.Client
|
||||
insecure bool
|
||||
listenPort int
|
||||
metricsPort int
|
||||
@@ -31,6 +35,7 @@ func NewCommand() *cobra.Command {
|
||||
repoServerTimeoutSeconds int
|
||||
staticAssetsDir string
|
||||
baseHRef string
|
||||
rootPath string
|
||||
repoServerAddress string
|
||||
dexServerAddress string
|
||||
disableAuth bool
|
||||
@@ -48,8 +53,7 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
config.QPS = common.K8sClientConfigQPS
|
||||
config.Burst = common.K8sClientConfigBurst
|
||||
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
|
||||
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
errors.CheckError(err)
|
||||
@@ -63,6 +67,13 @@ func NewCommand() *cobra.Command {
|
||||
appclientset := appclientset.NewForConfigOrDie(config)
|
||||
repoclientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds)
|
||||
|
||||
if rootPath != "" {
|
||||
if baseHRef != "" && baseHRef != rootPath {
|
||||
log.Warnf("--basehref and --rootpath had conflict: basehref: %s rootpath: %s", baseHRef, rootPath)
|
||||
}
|
||||
baseHRef = rootPath
|
||||
}
|
||||
|
||||
argoCDOpts := server.ArgoCDServerOpts{
|
||||
Insecure: insecure,
|
||||
ListenPort: listenPort,
|
||||
@@ -70,6 +81,7 @@ func NewCommand() *cobra.Command {
|
||||
Namespace: namespace,
|
||||
StaticAssetsDir: staticAssetsDir,
|
||||
BaseHRef: baseHRef,
|
||||
RootPath: rootPath,
|
||||
KubeClientset: kubeclientset,
|
||||
AppClientset: appclientset,
|
||||
RepoClientset: repoclientset,
|
||||
@@ -78,6 +90,7 @@ func NewCommand() *cobra.Command {
|
||||
TLSConfigCustomizer: tlsConfigCustomizer,
|
||||
Cache: cache,
|
||||
XFrameOptions: frameOptions,
|
||||
RedisClient: redisClient,
|
||||
}
|
||||
|
||||
stats.RegisterStackDumper()
|
||||
@@ -98,6 +111,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&insecure, "insecure", false, "Run server without TLS")
|
||||
command.Flags().StringVar(&staticAssetsDir, "staticassets", "", "Static assets directory path")
|
||||
command.Flags().StringVar(&baseHRef, "basehref", "/", "Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /")
|
||||
command.Flags().StringVar(&rootPath, "rootpath", "", "Used if Argo CD is running behind reverse proxy under subpath different from /")
|
||||
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address")
|
||||
@@ -109,6 +123,8 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.")
|
||||
command.Flags().StringVar(&frameOptions, "x-frame-options", "sameorigin", "Set X-Frame-Options header in HTTP responses to `value`. To disable, set to \"\".")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
|
||||
cacheSrc = servercache.AddCacheFlagsToCmd(command)
|
||||
cacheSrc = servercache.AddCacheFlagsToCmd(command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package main
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package main
|
||||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
501
cmd/argocd-util/commands/settings.go
Normal file
501
cmd/argocd-util/commands/settings.go
Normal file
@@ -0,0 +1,501 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/diff"
|
||||
"github.com/argoproj/argo-cd/util/health"
|
||||
"github.com/argoproj/argo-cd/util/lua"
|
||||
"github.com/argoproj/argo-cd/util/settings"
|
||||
)
|
||||
|
||||
type settingsOpts struct {
|
||||
argocdCMPath string
|
||||
argocdSecretPath string
|
||||
loadClusterSettings bool
|
||||
clientConfig clientcmd.ClientConfig
|
||||
}
|
||||
|
||||
type commandContext interface {
|
||||
createSettingsManager() (*settings.SettingsManager, error)
|
||||
}
|
||||
|
||||
func collectLogs(callback func()) string {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
out := bytes.Buffer{}
|
||||
log.SetOutput(&out)
|
||||
defer log.SetLevel(log.FatalLevel)
|
||||
callback()
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func setSettingsMeta(obj v1.Object) {
|
||||
obj.SetNamespace("default")
|
||||
labels := obj.GetLabels()
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels["app.kubernetes.io/part-of"] = "argocd"
|
||||
obj.SetLabels(labels)
|
||||
}
|
||||
|
||||
func (opts *settingsOpts) createSettingsManager() (*settings.SettingsManager, error) {
|
||||
var argocdCM *corev1.ConfigMap
|
||||
if opts.argocdCMPath == "" && !opts.loadClusterSettings {
|
||||
return nil, fmt.Errorf("either --argocd-cm-path must be provided or --load-cluster-settings must be set to true")
|
||||
} else if opts.argocdCMPath == "" {
|
||||
realClientset, ns, err := opts.getK8sClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
argocdCM, err = realClientset.CoreV1().ConfigMaps(ns).Get(common.ArgoCDConfigMapName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
data, err := ioutil.ReadFile(opts.argocdCMPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(data, &argocdCM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
setSettingsMeta(argocdCM)
|
||||
|
||||
var argocdSecret *corev1.Secret
|
||||
if opts.argocdSecretPath != "" {
|
||||
data, err := ioutil.ReadFile(opts.argocdSecretPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(data, &argocdSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setSettingsMeta(argocdSecret)
|
||||
} else if opts.loadClusterSettings {
|
||||
realClientset, ns, err := opts.getK8sClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
argocdSecret, err = realClientset.CoreV1().Secrets(ns).Get(common.ArgoCDSecretName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
argocdSecret = &corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"admin.password": []byte("test"),
|
||||
"server.secretkey": []byte("test"),
|
||||
},
|
||||
}
|
||||
}
|
||||
setSettingsMeta(argocdSecret)
|
||||
clientset := fake.NewSimpleClientset(argocdSecret, argocdCM)
|
||||
|
||||
manager := settings.NewSettingsManager(context.Background(), clientset, "default")
|
||||
errors.CheckError(manager.ResyncInformers())
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (opts *settingsOpts) getK8sClient() (*kubernetes.Clientset, string, error) {
|
||||
namespace, _, err := opts.clientConfig.Namespace()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
restConfig, err := opts.clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
realClientset, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return realClientset, namespace, nil
|
||||
}
|
||||
|
||||
func NewSettingsCommand() *cobra.Command {
|
||||
var (
|
||||
opts settingsOpts
|
||||
)
|
||||
|
||||
var command = &cobra.Command{
|
||||
Use: "settings",
|
||||
Short: "Provides set of commands for settings validation and troubleshooting",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
},
|
||||
}
|
||||
log.SetLevel(log.FatalLevel)
|
||||
|
||||
command.AddCommand(NewValidateSettingsCommand(&opts))
|
||||
command.AddCommand(NewResourceOverridesCommand(&opts))
|
||||
|
||||
opts.clientConfig = cli.AddKubectlFlagsToCmd(command)
|
||||
command.PersistentFlags().StringVar(&opts.argocdCMPath, "argocd-cm-path", "", "Path to local argocd-cm.yaml file")
|
||||
command.PersistentFlags().StringVar(&opts.argocdSecretPath, "argocd-secret-path", "", "Path to local argocd-secret.yaml file")
|
||||
command.PersistentFlags().BoolVar(&opts.loadClusterSettings, "load-cluster-settings", false,
|
||||
"Indicates that config map and secret should be loaded from cluster unless local file path is provided")
|
||||
return command
|
||||
}
|
||||
|
||||
type settingValidator func(manager *settings.SettingsManager) (string, error)
|
||||
|
||||
func joinValidators(validators ...settingValidator) settingValidator {
|
||||
return func(manager *settings.SettingsManager) (string, error) {
|
||||
var errorStrs []string
|
||||
var summaries []string
|
||||
for i := range validators {
|
||||
summary, err := validators[i](manager)
|
||||
if err != nil {
|
||||
errorStrs = append(errorStrs, err.Error())
|
||||
}
|
||||
if summary != "" {
|
||||
summaries = append(summaries, summary)
|
||||
}
|
||||
}
|
||||
if len(errorStrs) > 0 {
|
||||
return "", fmt.Errorf("%s", strings.Join(errorStrs, "\n"))
|
||||
}
|
||||
return strings.Join(summaries, "\n"), nil
|
||||
}
|
||||
}
|
||||
|
||||
var validatorsByGroup = map[string]settingValidator{
|
||||
"general": joinValidators(func(manager *settings.SettingsManager) (string, error) {
|
||||
general, err := manager.GetSettings()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ssoProvider := ""
|
||||
if general.DexConfig != "" {
|
||||
if _, err := settings.UnmarshalDexConfig(general.DexConfig); err != nil {
|
||||
return "", fmt.Errorf("invalid dex.config: %v", err)
|
||||
}
|
||||
ssoProvider = "Dex"
|
||||
} else if general.OIDCConfigRAW != "" {
|
||||
if _, err := settings.UnmarshalOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
return "", fmt.Errorf("invalid oidc.config: %v", err)
|
||||
}
|
||||
ssoProvider = "OIDC"
|
||||
}
|
||||
var summary string
|
||||
if ssoProvider != "" {
|
||||
summary = fmt.Sprintf("%s is configured", ssoProvider)
|
||||
if general.URL == "" {
|
||||
summary = summary + " ('url' field is missing)"
|
||||
}
|
||||
} else if ssoProvider != "" && general.URL != "" {
|
||||
|
||||
} else {
|
||||
summary = "SSO is not configured"
|
||||
}
|
||||
return summary, nil
|
||||
}, func(manager *settings.SettingsManager) (string, error) {
|
||||
_, err := manager.GetAppInstanceLabelKey()
|
||||
return "", err
|
||||
}, func(manager *settings.SettingsManager) (string, error) {
|
||||
_, err := manager.GetHelp()
|
||||
return "", err
|
||||
}, func(manager *settings.SettingsManager) (string, error) {
|
||||
_, err := manager.GetGoogleAnalytics()
|
||||
return "", err
|
||||
}),
|
||||
"plugins": func(manager *settings.SettingsManager) (string, error) {
|
||||
plugins, err := manager.GetConfigManagementPlugins()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%d plugins", len(plugins)), nil
|
||||
},
|
||||
"kustomize": func(manager *settings.SettingsManager) (string, error) {
|
||||
opts, err := manager.GetKustomizeSettings()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
summary := "default options"
|
||||
if opts.BuildOptions != "" {
|
||||
summary = opts.BuildOptions
|
||||
}
|
||||
if len(opts.Versions) > 0 {
|
||||
summary = fmt.Sprintf("%s (%d versions)", summary, len(opts.Versions))
|
||||
}
|
||||
return summary, err
|
||||
},
|
||||
"repositories": joinValidators(func(manager *settings.SettingsManager) (string, error) {
|
||||
repos, err := manager.GetRepositories()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%d repositories", len(repos)), nil
|
||||
}, func(manager *settings.SettingsManager) (string, error) {
|
||||
creds, err := manager.GetRepositoryCredentials()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%d repository credentials", len(creds)), nil
|
||||
}),
|
||||
"accounts": func(manager *settings.SettingsManager) (string, error) {
|
||||
accounts, err := manager.GetAccounts()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%d accounts", len(accounts)), nil
|
||||
},
|
||||
"resource-overrides": func(manager *settings.SettingsManager) (string, error) {
|
||||
overrides, err := manager.GetResourceOverrides()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%d resource overrides", len(overrides)), nil
|
||||
},
|
||||
}
|
||||
|
||||
func NewValidateSettingsCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var (
|
||||
groups []string
|
||||
)
|
||||
|
||||
var allGroups []string
|
||||
for k := range validatorsByGroup {
|
||||
allGroups = append(allGroups, k)
|
||||
}
|
||||
sort.Slice(allGroups, func(i, j int) bool {
|
||||
return allGroups[i] < allGroups[j]
|
||||
})
|
||||
|
||||
var command = &cobra.Command{
|
||||
Use: "validate",
|
||||
Short: "Validate settings",
|
||||
Long: "Validates settings specified in 'argocd-cm' ConfigMap and 'argocd-secret' Secret",
|
||||
Example: `
|
||||
#Validates all settings in the specified YAML file
|
||||
argocd-util settings validate --argocd-cm-path ./argocd-cm.yaml
|
||||
|
||||
#Validates accounts and plugins settings in Kubernetes cluster of current kubeconfig context
|
||||
argocd-util settings validate --group accounts --group plugins --load-cluster-settings`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
settingsManager, err := cmdCtx.createSettingsManager()
|
||||
errors.CheckError(err)
|
||||
|
||||
if len(groups) == 0 {
|
||||
groups = allGroups
|
||||
}
|
||||
for i, group := range groups {
|
||||
validator := validatorsByGroup[group]
|
||||
|
||||
logs := collectLogs(func() {
|
||||
summary, err := validator(settingsManager)
|
||||
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stdout, "❌ %s\n", group)
|
||||
_, _ = fmt.Fprintf(os.Stdout, "%s\n", err.Error())
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(os.Stdout, "✅ %s\n", group)
|
||||
if summary != "" {
|
||||
_, _ = fmt.Fprintf(os.Stdout, "%s\n", summary)
|
||||
}
|
||||
}
|
||||
})
|
||||
if logs != "" {
|
||||
_, _ = fmt.Fprintf(os.Stdout, "%s\n", logs)
|
||||
}
|
||||
if i != len(groups)-1 {
|
||||
_, _ = fmt.Fprintf(os.Stdout, "\n")
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
command.Flags().StringArrayVar(&groups, "group", nil, fmt.Sprintf(
|
||||
"Optional list of setting groups that have to be validated ( one of: %s)", strings.Join(allGroups, ", ")))
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func NewResourceOverridesCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "resource-overrides",
|
||||
Short: "Troubleshoot resource overrides",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
},
|
||||
}
|
||||
command.AddCommand(NewResourceIgnoreDifferencesCommand(cmdCtx))
|
||||
command.AddCommand(NewResourceActionCommand(cmdCtx))
|
||||
command.AddCommand(NewResourceHealthCommand(cmdCtx))
|
||||
return command
|
||||
}
|
||||
|
||||
func executeResourceOverrideCommand(cmdCtx commandContext, args []string, callback func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride)) {
|
||||
data, err := ioutil.ReadFile(args[0])
|
||||
errors.CheckError(err)
|
||||
|
||||
res := unstructured.Unstructured{}
|
||||
errors.CheckError(yaml.Unmarshal(data, &res))
|
||||
|
||||
settingsManager, err := cmdCtx.createSettingsManager()
|
||||
errors.CheckError(err)
|
||||
|
||||
overrides, err := settingsManager.GetResourceOverrides()
|
||||
errors.CheckError(err)
|
||||
gvk := res.GroupVersionKind()
|
||||
key := gvk.Kind
|
||||
if gvk.Group != "" {
|
||||
key = fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind)
|
||||
}
|
||||
override, hasOverride := overrides[key]
|
||||
if !hasOverride {
|
||||
_, _ = fmt.Printf("No overrides configured for '%s/%s'\n", gvk.Group, gvk.Kind)
|
||||
return
|
||||
}
|
||||
callback(res, override, overrides)
|
||||
}
|
||||
|
||||
func NewResourceIgnoreDifferencesCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "ignore-differences RESOURCE_YAML_PATH",
|
||||
Short: "Renders fields excluded from diffing",
|
||||
Long: "Renders ignored fields using the 'ignoreDifferences' setting specified in the 'resource.customizations' field of 'argocd-cm' ConfigMap",
|
||||
Example: `
|
||||
argocd-util settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) < 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
|
||||
gvk := res.GroupVersionKind()
|
||||
if override.IgnoreDifferences == "" {
|
||||
_, _ = fmt.Printf("Ignore differences are not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
|
||||
return
|
||||
}
|
||||
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides)
|
||||
errors.CheckError(err)
|
||||
|
||||
normalizedRes := res.DeepCopy()
|
||||
logs := collectLogs(func() {
|
||||
errors.CheckError(normalizer.Normalize(normalizedRes))
|
||||
})
|
||||
if logs != "" {
|
||||
_, _ = fmt.Println(logs)
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(&res, normalizedRes) {
|
||||
_, _ = fmt.Printf("No fields are ignored by ignoreDifferences settings: \n%s\n", override.IgnoreDifferences)
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = fmt.Printf("Following fields are ignored:\n\n")
|
||||
_ = diff.PrintDiff(res.GetName(), &res, normalizedRes)
|
||||
})
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func NewResourceHealthCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "health RESOURCE_YAML_PATH",
|
||||
Short: "Assess resource health",
|
||||
Long: "Assess resource health using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap",
|
||||
Example: `
|
||||
argocd-util settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) < 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
|
||||
gvk := res.GroupVersionKind()
|
||||
if override.HealthLua == "" {
|
||||
_, _ = fmt.Printf("Health script is not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
|
||||
return
|
||||
}
|
||||
|
||||
resHealth, err := health.GetResourceHealth(&res, overrides)
|
||||
errors.CheckError(err)
|
||||
|
||||
_, _ = fmt.Printf("STATUS: %s\n", resHealth.Status)
|
||||
_, _ = fmt.Printf("MESSAGE: %s\n", resHealth.Message)
|
||||
})
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func NewResourceActionCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "action RESOURCE_YAML_PATH ACTION",
|
||||
Short: "Executes resource action",
|
||||
Long: "Executes resource action using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields",
|
||||
Example: `
|
||||
argocd-util settings resource-overrides action /tmp/deploy.yaml restart --argocd-cm-path ./argocd-cm.yaml`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) < 2 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
action := args[1]
|
||||
|
||||
executeResourceOverrideCommand(cmdCtx, args, func(res unstructured.Unstructured, override v1alpha1.ResourceOverride, overrides map[string]v1alpha1.ResourceOverride) {
|
||||
gvk := res.GroupVersionKind()
|
||||
if override.Actions == "" {
|
||||
_, _ = fmt.Printf("Actions are not configured for '%s/%s'\n", gvk.Group, gvk.Kind)
|
||||
return
|
||||
}
|
||||
|
||||
luaVM := lua.VM{ResourceOverrides: overrides}
|
||||
action, err := luaVM.GetResourceAction(&res, action)
|
||||
errors.CheckError(err)
|
||||
|
||||
modifiedRes, err := luaVM.ExecuteResourceAction(&res, action.ActionLua)
|
||||
errors.CheckError(err)
|
||||
|
||||
if reflect.DeepEqual(&res, modifiedRes) {
|
||||
_, _ = fmt.Printf("No fields had been changed by action: \n%s\n", action.Name)
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = fmt.Printf("Following fields have been changed:\n\n")
|
||||
_ = diff.PrintDiff(res.GetName(), &res, modifiedRes)
|
||||
})
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
367
cmd/argocd-util/commands/settings_test.go
Normal file
367
cmd/argocd-util/commands/settings_test.go
Normal file
@@ -0,0 +1,367 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/settings"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func captureStdout(callback func()) (string, error) {
|
||||
oldStdout := os.Stdout
|
||||
oldStderr := os.Stderr
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
os.Stdout = w
|
||||
defer func() {
|
||||
os.Stdout = oldStdout
|
||||
os.Stderr = oldStderr
|
||||
}()
|
||||
|
||||
callback()
|
||||
util.Close(w)
|
||||
|
||||
data, err := ioutil.ReadAll(r)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func newSettingsManager(data map[string]string) *settings.SettingsManager {
|
||||
clientset := fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
Data: data,
|
||||
}, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: common.ArgoCDSecretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"admin.password": []byte("test"),
|
||||
"server.secretkey": []byte("test"),
|
||||
},
|
||||
})
|
||||
return settings.NewSettingsManager(context.Background(), clientset, "default")
|
||||
}
|
||||
|
||||
type fakeCmdContext struct {
|
||||
mgr *settings.SettingsManager
|
||||
// nolint:unused,structcheck
|
||||
out bytes.Buffer
|
||||
}
|
||||
|
||||
func newCmdContext(data map[string]string) *fakeCmdContext {
|
||||
return &fakeCmdContext{mgr: newSettingsManager(data)}
|
||||
}
|
||||
|
||||
func (ctx *fakeCmdContext) createSettingsManager() (*settings.SettingsManager, error) {
|
||||
return ctx.mgr, nil
|
||||
}
|
||||
|
||||
type validatorTestCase struct {
|
||||
validator string
|
||||
data map[string]string
|
||||
containsSummary string
|
||||
containsError string
|
||||
}
|
||||
|
||||
func TestCreateSettingsManager(t *testing.T) {
|
||||
f, closer, err := tempFile(`apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
data:
|
||||
url: https://myargocd.com`)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
defer util.Close(closer)
|
||||
|
||||
opts := settingsOpts{argocdCMPath: f}
|
||||
settingsManager, err := opts.createSettingsManager()
|
||||
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
|
||||
argoCDSettings, err := settingsManager.GetSettings()
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, "https://myargocd.com", argoCDSettings.URL)
|
||||
}
|
||||
|
||||
func TestValidator(t *testing.T) {
|
||||
testCases := map[string]validatorTestCase{
|
||||
"General_SSOIsNotConfigured": {
|
||||
validator: "general", containsSummary: "SSO is not configured",
|
||||
},
|
||||
"General_DexInvalidConfig": {
|
||||
validator: "general",
|
||||
data: map[string]string{"dex.config": "abcdefg"},
|
||||
containsError: "invalid dex.config",
|
||||
},
|
||||
"General_OIDCConfigured": {
|
||||
validator: "general",
|
||||
data: map[string]string{
|
||||
"url": "https://myargocd.com",
|
||||
"oidc.config": `
|
||||
name: Okta
|
||||
issuer: https://dev-123456.oktapreview.com
|
||||
clientID: aaaabbbbccccddddeee
|
||||
clientSecret: aaaabbbbccccddddeee`,
|
||||
},
|
||||
containsSummary: "OIDC is configured",
|
||||
},
|
||||
"General_DexConfiguredMissingURL": {
|
||||
validator: "general",
|
||||
data: map[string]string{
|
||||
"dex.config": `connectors:
|
||||
- type: github
|
||||
name: GitHub
|
||||
config:
|
||||
clientID: aabbccddeeff00112233
|
||||
clientSecret: aabbccddeeff00112233`,
|
||||
},
|
||||
containsSummary: "Dex is configured ('url' field is missing)",
|
||||
},
|
||||
"Plugins_ValidConfig": {
|
||||
validator: "plugins",
|
||||
data: map[string]string{
|
||||
"configManagementPlugins": `[{"name": "test1"}, {"name": "test2"}]`,
|
||||
},
|
||||
containsSummary: "2 plugins",
|
||||
},
|
||||
"Kustomize_ModifiedOptions": {
|
||||
validator: "kustomize",
|
||||
containsSummary: "default options",
|
||||
},
|
||||
"Kustomize_DefaultOptions": {
|
||||
validator: "kustomize",
|
||||
data: map[string]string{
|
||||
"kustomize.buildOptions": "updated-options (2 versions)",
|
||||
"kustomize.versions.v123": "binary-123",
|
||||
"kustomize.versions.v321": "binary-321",
|
||||
},
|
||||
containsSummary: "updated-options",
|
||||
},
|
||||
"Repositories": {
|
||||
validator: "repositories",
|
||||
data: map[string]string{
|
||||
"repositories": `
|
||||
- url: https://github.com/argoproj/my-private-repository1
|
||||
- url: https://github.com/argoproj/my-private-repository2`,
|
||||
},
|
||||
containsSummary: "2 repositories",
|
||||
},
|
||||
"Accounts": {
|
||||
validator: "accounts",
|
||||
data: map[string]string{
|
||||
"accounts.user1": "apiKey, login",
|
||||
"accounts.user2": "login",
|
||||
"accounts.user3": "apiKey",
|
||||
},
|
||||
containsSummary: "4 accounts",
|
||||
},
|
||||
"ResourceOverrides": {
|
||||
validator: "resource-overrides",
|
||||
data: map[string]string{
|
||||
"resource.customizations": `
|
||||
admissionregistration.k8s.io/MutatingWebhookConfiguration:
|
||||
ignoreDifferences: |
|
||||
jsonPointers:
|
||||
- /webhooks/0/clientConfig/caBundle`,
|
||||
},
|
||||
containsSummary: "1 resource overrides",
|
||||
},
|
||||
}
|
||||
for name := range testCases {
|
||||
tc := testCases[name]
|
||||
t.Run(name, func(t *testing.T) {
|
||||
validator, ok := validatorsByGroup[tc.validator]
|
||||
if !assert.True(t, ok) {
|
||||
return
|
||||
}
|
||||
summary, err := validator(newSettingsManager(tc.data))
|
||||
if tc.containsSummary != "" {
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, summary, tc.containsSummary)
|
||||
} else if tc.containsError != "" {
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.containsError)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
testDeploymentYAML = `apiVersion: v1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 0`
|
||||
)
|
||||
|
||||
func tempFile(content string) (string, io.Closer, error) {
|
||||
f, err := ioutil.TempFile("", "*.yaml")
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
_, err = f.Write([]byte(content))
|
||||
if err != nil {
|
||||
_ = os.Remove(f.Name())
|
||||
return "", nil, err
|
||||
}
|
||||
return f.Name(), util.NewCloser(func() error {
|
||||
return os.Remove(f.Name())
|
||||
}), nil
|
||||
}
|
||||
|
||||
func TestValidateSettingsCommand_NoErrors(t *testing.T) {
|
||||
cmd := NewValidateSettingsCommand(newCmdContext(map[string]string{}))
|
||||
out, err := captureStdout(func() {
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
for k := range validatorsByGroup {
|
||||
assert.Contains(t, out, fmt.Sprintf("✅ %s", k))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceOverrideIgnoreDifferences(t *testing.T) {
|
||||
f, closer, err := tempFile(testDeploymentYAML)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
defer util.Close(closer)
|
||||
|
||||
t.Run("NoOverridesConfigured", func(t *testing.T) {
|
||||
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{}))
|
||||
out, err := captureStdout(func() {
|
||||
cmd.SetArgs([]string{"ignore-differences", f})
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "No overrides configured")
|
||||
})
|
||||
|
||||
t.Run("DataIgnored", func(t *testing.T) {
|
||||
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
|
||||
"resource.customizations": `apps/Deployment:
|
||||
ignoreDifferences: |
|
||||
jsonPointers:
|
||||
- /spec`}))
|
||||
out, err := captureStdout(func() {
|
||||
cmd.SetArgs([]string{"ignore-differences", f})
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "< spec:")
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceOverrideHealth(t *testing.T) {
|
||||
f, closer, err := tempFile(testDeploymentYAML)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
defer util.Close(closer)
|
||||
|
||||
t.Run("NoHealthAssessment", func(t *testing.T) {
|
||||
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
|
||||
"resource.customizations": `apps/Deployment: {}`}))
|
||||
out, err := captureStdout(func() {
|
||||
cmd.SetArgs([]string{"health", f})
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "Health script is not configured")
|
||||
})
|
||||
|
||||
t.Run("HealthAssessmentConfigured", func(t *testing.T) {
|
||||
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
|
||||
"resource.customizations": `apps/Deployment:
|
||||
health.lua: |
|
||||
return { status = "Progressing" }
|
||||
`}))
|
||||
out, err := captureStdout(func() {
|
||||
cmd.SetArgs([]string{"health", f})
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "Progressing")
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceOverrideAction(t *testing.T) {
|
||||
f, closer, err := tempFile(testDeploymentYAML)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
defer util.Close(closer)
|
||||
|
||||
t.Run("NoActions", func(t *testing.T) {
|
||||
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
|
||||
"resource.customizations": `apps/Deployment: {}`}))
|
||||
out, err := captureStdout(func() {
|
||||
cmd.SetArgs([]string{"action", f, "test"})
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "Actions are not configured")
|
||||
})
|
||||
|
||||
t.Run("HealthAssessmentConfigured", func(t *testing.T) {
|
||||
cmd := NewResourceOverridesCommand(newCmdContext(map[string]string{
|
||||
"resource.customizations": `apps/Deployment:
|
||||
actions: |
|
||||
definitions:
|
||||
- name: test
|
||||
action.lua: |
|
||||
obj.metadata.labels["test"] = 'updated'
|
||||
return obj
|
||||
`}))
|
||||
out, err := captureStdout(func() {
|
||||
cmd.SetArgs([]string{"action", f, "test"})
|
||||
err := cmd.Execute()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "test: updated")
|
||||
})
|
||||
}
|
||||
@@ -24,9 +24,9 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/cmd/argocd-util/commands"
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/db"
|
||||
"github.com/argoproj/argo-cd/util/dex"
|
||||
@@ -73,7 +73,8 @@ func NewCommand() *cobra.Command {
|
||||
command.AddCommand(NewImportCommand())
|
||||
command.AddCommand(NewExportCommand())
|
||||
command.AddCommand(NewClusterConfig())
|
||||
command.AddCommand(NewProjectsCommand())
|
||||
command.AddCommand(commands.NewProjectsCommand())
|
||||
command.AddCommand(commands.NewSettingsCommand())
|
||||
|
||||
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
return command
|
||||
@@ -109,7 +110,7 @@ func NewRunDexCommand() *cobra.Command {
|
||||
} else {
|
||||
err = ioutil.WriteFile("/tmp/dex.yaml", dexCfgBytes, 0644)
|
||||
errors.CheckError(err)
|
||||
log.Info(redactor(string(dexCfgBytes)))
|
||||
log.Debug(redactor(string(dexCfgBytes)))
|
||||
cmd = exec.Command("dex", "serve", "/tmp/dex.yaml")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
@@ -385,8 +386,14 @@ func NewExportCommand() *cobra.Command {
|
||||
} else {
|
||||
f, err := os.Create(out)
|
||||
errors.CheckError(err)
|
||||
defer util.Close(f)
|
||||
writer = bufio.NewWriter(f)
|
||||
bw := bufio.NewWriter(f)
|
||||
writer = bw
|
||||
defer func() {
|
||||
err = bw.Flush()
|
||||
errors.CheckError(err)
|
||||
err = f.Close()
|
||||
errors.CheckError(err)
|
||||
}()
|
||||
}
|
||||
|
||||
acdClients := newArgoCDClientsets(config, namespace)
|
||||
@@ -540,10 +547,21 @@ func specsEqual(left, right unstructured.Unstructured) bool {
|
||||
leftData, _, _ := unstructured.NestedMap(left.Object, "data")
|
||||
rightData, _, _ := unstructured.NestedMap(right.Object, "data")
|
||||
return reflect.DeepEqual(leftData, rightData)
|
||||
case "AppProject", "Application":
|
||||
case "AppProject":
|
||||
leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec")
|
||||
rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec")
|
||||
return reflect.DeepEqual(leftSpec, rightSpec)
|
||||
case "Application":
|
||||
leftSpec, _, _ := unstructured.NestedMap(left.Object, "spec")
|
||||
rightSpec, _, _ := unstructured.NestedMap(right.Object, "spec")
|
||||
leftStatus, _, _ := unstructured.NestedMap(left.Object, "status")
|
||||
rightStatus, _, _ := unstructured.NestedMap(right.Object, "status")
|
||||
// reconciledAt and observedAt are constantly changing and we ignore any diff there
|
||||
delete(leftStatus, "reconciledAt")
|
||||
delete(rightStatus, "reconciledAt")
|
||||
delete(leftStatus, "observedAt")
|
||||
delete(rightStatus, "observedAt")
|
||||
return reflect.DeepEqual(leftSpec, rightSpec) && reflect.DeepEqual(leftStatus, rightStatus)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -558,8 +576,13 @@ func updateLive(bak, live *unstructured.Unstructured) *unstructured.Unstructured
|
||||
switch live.GetKind() {
|
||||
case "Secret", "ConfigMap":
|
||||
newLive.Object["data"] = bak.Object["data"]
|
||||
case "AppProject", "Application":
|
||||
case "AppProject":
|
||||
newLive.Object["spec"] = bak.Object["spec"]
|
||||
case "Application":
|
||||
newLive.Object["spec"] = bak.Object["spec"]
|
||||
if _, ok := bak.Object["status"]; ok {
|
||||
newLive.Object["status"] = bak.Object["status"]
|
||||
}
|
||||
}
|
||||
return newLive
|
||||
}
|
||||
@@ -611,7 +634,7 @@ func NewClusterConfig() *cobra.Command {
|
||||
|
||||
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(context.Background(), kubeclientset, namespace), kubeclientset).GetCluster(context.Background(), serverUrl)
|
||||
errors.CheckError(err)
|
||||
err = kube.WriteKubeConfig(cluster.RESTConfig(), namespace, output)
|
||||
err = kube.WriteKubeConfig(cluster.RawRestConfig(), namespace, output)
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -640,7 +663,7 @@ func redactor(dirtyString string) string {
|
||||
err := yaml.Unmarshal([]byte(dirtyString), &config)
|
||||
errors.CheckError(err)
|
||||
iterateStringFields(config, func(name string, val string) string {
|
||||
if name == "clientSecret" || name == "secret" {
|
||||
if name == "clientSecret" || name == "secret" || name == "bindPW" {
|
||||
return "********"
|
||||
} else {
|
||||
return val
|
||||
|
||||
@@ -18,8 +18,17 @@ connectors:
|
||||
id: github
|
||||
name: GitHub
|
||||
type: github
|
||||
- config:
|
||||
bindDN: uid=serviceaccount,cn=users,dc=example,dc=com
|
||||
bindPW: theSecret
|
||||
host: ldap.example.com:636
|
||||
id: ldap
|
||||
name: LDAP
|
||||
type: ldap
|
||||
grpc:
|
||||
addr: 0.0.0.0:5557
|
||||
telemetry:
|
||||
http: 0.0.0.0:5558
|
||||
issuer: https://argocd.example.com/api/dex
|
||||
oauth2:
|
||||
skipApprovalScreen: true
|
||||
@@ -49,6 +58,13 @@ var expectedRedaction = `connectors:
|
||||
id: github
|
||||
name: GitHub
|
||||
type: github
|
||||
- config:
|
||||
bindDN: uid=serviceaccount,cn=users,dc=example,dc=com
|
||||
bindPW: '********'
|
||||
host: ldap.example.com:636
|
||||
id: ldap
|
||||
name: LDAP
|
||||
type: ldap
|
||||
grpc:
|
||||
addr: 0.0.0.0:5557
|
||||
issuer: https://argocd.example.com/api/dex
|
||||
@@ -67,6 +83,8 @@ staticClients:
|
||||
- http://localhost
|
||||
storage:
|
||||
type: memory
|
||||
telemetry:
|
||||
http: 0.0.0.0:5558
|
||||
web:
|
||||
http: 0.0.0.0:5556
|
||||
`
|
||||
|
||||
@@ -5,9 +5,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
timeutil "github.com/argoproj/pkg/time"
|
||||
"github.com/ghodss/yaml"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -21,6 +24,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/localconfig"
|
||||
sessionutil "github.com/argoproj/argo-cd/util/session"
|
||||
)
|
||||
|
||||
func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
@@ -35,11 +39,16 @@ func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command.AddCommand(NewAccountUpdatePasswordCommand(clientOpts))
|
||||
command.AddCommand(NewAccountGetUserInfoCommand(clientOpts))
|
||||
command.AddCommand(NewAccountCanICommand(clientOpts))
|
||||
command.AddCommand(NewAccountListCommand(clientOpts))
|
||||
command.AddCommand(NewAccountGenerateTokenCommand(clientOpts))
|
||||
command.AddCommand(NewAccountGetCommand(clientOpts))
|
||||
command.AddCommand(NewAccountDeleteTokenCommand(clientOpts))
|
||||
return command
|
||||
}
|
||||
|
||||
func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
account string
|
||||
currentPassword string
|
||||
newPassword string
|
||||
)
|
||||
@@ -51,14 +60,20 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
acdClient := argocdclient.NewClientOrDie(clientOpts)
|
||||
conn, usrIf := acdClient.NewAccountClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
if currentPassword == "" {
|
||||
userInfo := getCurrentAccount(acdClient)
|
||||
|
||||
if userInfo.Iss == sessionutil.SessionManagerClaimsIssuer && currentPassword == "" {
|
||||
fmt.Print("*** Enter current password: ")
|
||||
password, err := terminal.ReadPassword(syscall.Stdin)
|
||||
password, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
errors.CheckError(err)
|
||||
currentPassword = string(password)
|
||||
fmt.Print("\n")
|
||||
}
|
||||
|
||||
if newPassword == "" {
|
||||
var err error
|
||||
newPassword, err = cli.ReadAndConfirmPassword()
|
||||
@@ -68,37 +83,37 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
|
||||
updatePasswordRequest := accountpkg.UpdatePasswordRequest{
|
||||
NewPassword: newPassword,
|
||||
CurrentPassword: currentPassword,
|
||||
Name: account,
|
||||
}
|
||||
|
||||
acdClient := argocdclient.NewClientOrDie(clientOpts)
|
||||
conn, usrIf := acdClient.NewAccountClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := usrIf.UpdatePassword(ctx, &updatePasswordRequest)
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Password updated\n")
|
||||
|
||||
// Get a new JWT token after updating the password
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
configCtx, err := localCfg.ResolveContext(clientOpts.Context)
|
||||
errors.CheckError(err)
|
||||
claims, err := configCtx.User.Claims()
|
||||
errors.CheckError(err)
|
||||
tokenString := passwordLogin(acdClient, claims.Subject, newPassword)
|
||||
localCfg.UpsertUser(localconfig.User{
|
||||
Name: localCfg.CurrentContext,
|
||||
AuthToken: tokenString,
|
||||
})
|
||||
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
|
||||
if account == "" || account == userInfo.Username {
|
||||
// Get a new JWT token after updating the password
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
configCtx, err := localCfg.ResolveContext(clientOpts.Context)
|
||||
errors.CheckError(err)
|
||||
claims, err := configCtx.User.Claims()
|
||||
errors.CheckError(err)
|
||||
tokenString := passwordLogin(acdClient, claims.Subject, newPassword)
|
||||
localCfg.UpsertUser(localconfig.User{
|
||||
Name: localCfg.CurrentContext,
|
||||
AuthToken: tokenString,
|
||||
})
|
||||
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
command.Flags().StringVar(¤tPassword, "current-password", "", "current password you wish to change")
|
||||
command.Flags().StringVar(&newPassword, "new-password", "", "new password you want to update to")
|
||||
command.Flags().StringVar(&account, "account", "", "an account name that should be updated. Defaults to current user account")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -159,7 +174,7 @@ argocd account can-i sync applications '*'
|
||||
argocd account can-i update projects 'default'
|
||||
|
||||
# Can I create a cluster?
|
||||
argocd account can-i create cluster '*'
|
||||
argocd account can-i create clusters '*'
|
||||
|
||||
Actions: %v
|
||||
Resources: %v
|
||||
@@ -184,3 +199,202 @@ Resources: %v
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func printAccountNames(accounts []*accountpkg.Account) {
|
||||
for _, p := range accounts {
|
||||
fmt.Println(p.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func printAccountsTable(items []*accountpkg.Account) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "NAME\tENABLED\tCAPABILITIES\n")
|
||||
for _, a := range items {
|
||||
fmt.Fprintf(w, "%s\t%v\t%s\n", a.Name, a.Enabled, strings.Join(a.Capabilities, ", "))
|
||||
}
|
||||
_ = w.Flush()
|
||||
}
|
||||
|
||||
func NewAccountListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
output string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List accounts",
|
||||
Example: "argocd account list",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
conn, client := argocdclient.NewClientOrDie(clientOpts).NewAccountClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
ctx := context.Background()
|
||||
response, err := client.ListAccounts(ctx, &accountpkg.ListAccountRequest{})
|
||||
|
||||
errors.CheckError(err)
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
err := PrintResourceList(response.Items, output, false)
|
||||
errors.CheckError(err)
|
||||
case "name":
|
||||
printAccountNames(response.Items)
|
||||
case "wide", "":
|
||||
printAccountsTable(response.Items)
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
}
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|name")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getCurrentAccount(clientset argocdclient.Client) session.GetUserInfoResponse {
|
||||
conn, client := clientset.NewSessionClientOrDie()
|
||||
defer util.Close(conn)
|
||||
userInfo, err := client.GetUserInfo(context.Background(), &session.GetUserInfoRequest{})
|
||||
errors.CheckError(err)
|
||||
return *userInfo
|
||||
}
|
||||
|
||||
func NewAccountGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
output string
|
||||
account string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get account details",
|
||||
Example: `# Get the currently logged in account details
|
||||
argocd account get
|
||||
|
||||
# Get details for an account by name
|
||||
argocd account get --account <account-name>`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
clientset := argocdclient.NewClientOrDie(clientOpts)
|
||||
|
||||
if account == "" {
|
||||
account = getCurrentAccount(clientset).Username
|
||||
}
|
||||
|
||||
conn, client := clientset.NewAccountClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
acc, err := client.GetAccount(context.Background(), &accountpkg.GetAccountRequest{Name: account})
|
||||
|
||||
errors.CheckError(err)
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
err := PrintResourceList(acc, output, true)
|
||||
errors.CheckError(err)
|
||||
case "name":
|
||||
fmt.Println(acc.Name)
|
||||
case "wide", "":
|
||||
printAccountDetails(acc)
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
}
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|name")
|
||||
cmd.Flags().StringVarP(&account, "account", "a", "", "Account name. Defaults to the current account.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func printAccountDetails(acc *accountpkg.Account) {
|
||||
fmt.Printf(printOpFmtStr, "Name:", acc.Name)
|
||||
fmt.Printf(printOpFmtStr, "Enabled:", strconv.FormatBool(acc.Enabled))
|
||||
fmt.Printf(printOpFmtStr, "Capabilities:", strings.Join(acc.Capabilities, ", "))
|
||||
fmt.Println("\nTokens:")
|
||||
if len(acc.Tokens) == 0 {
|
||||
fmt.Println("NONE")
|
||||
} else {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "ID\tISSUED AT\tEXPIRING AT\n")
|
||||
for _, t := range acc.Tokens {
|
||||
expiresAtFormatted := "never"
|
||||
if t.ExpiresAt > 0 {
|
||||
expiresAt := time.Unix(t.ExpiresAt, 0)
|
||||
expiresAtFormatted = expiresAt.Format(time.RFC3339)
|
||||
if expiresAt.Before(time.Now()) {
|
||||
expiresAtFormatted = fmt.Sprintf("%s (expired)", expiresAtFormatted)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\n", t.Id, time.Unix(t.IssuedAt, 0).Format(time.RFC3339), expiresAtFormatted)
|
||||
}
|
||||
_ = w.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func NewAccountGenerateTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
account string
|
||||
expiresIn string
|
||||
id string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "generate-token",
|
||||
Short: "Generate account token",
|
||||
Example: `# Generate token for the currently logged in account
|
||||
argocd account generate-token
|
||||
|
||||
# Generate token for the account with the specified name
|
||||
argocd account generate-token --account <account-name>`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
clientset := argocdclient.NewClientOrDie(clientOpts)
|
||||
conn, client := clientset.NewAccountClientOrDie()
|
||||
defer util.Close(conn)
|
||||
if account == "" {
|
||||
account = getCurrentAccount(clientset).Username
|
||||
}
|
||||
expiresIn, err := timeutil.ParseDuration(expiresIn)
|
||||
errors.CheckError(err)
|
||||
response, err := client.CreateToken(context.Background(), &accountpkg.CreateTokenRequest{
|
||||
Name: account,
|
||||
ExpiresIn: int64(expiresIn.Seconds()),
|
||||
Id: id,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
fmt.Println(response.Token)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&account, "account", "a", "", "Account name. Defaults to the current account.")
|
||||
cmd.Flags().StringVarP(&expiresIn, "expires-in", "e", "0s", "Duration before the token will expire. (Default: No expiration)")
|
||||
cmd.Flags().StringVar(&id, "id", "", "Optional token id. Fallback to uuid if not value specified.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func NewAccountDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
account string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete-token",
|
||||
Short: "Deletes account token",
|
||||
Example: `# Delete token of the currently logged in account
|
||||
argocd account delete-token ID
|
||||
|
||||
# Delete token of the account with the specified name
|
||||
argocd account generate-token --account <account-name>`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
id := args[0]
|
||||
|
||||
clientset := argocdclient.NewClientOrDie(clientOpts)
|
||||
conn, client := clientset.NewAccountClientOrDie()
|
||||
defer util.Close(conn)
|
||||
if account == "" {
|
||||
account = getCurrentAccount(clientset).Username
|
||||
}
|
||||
_, err := client.DeleteToken(context.Background(), &accountpkg.DeleteTokenRequest{Name: account, Id: id})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&account, "account", "a", "", "Account name. Defaults to the current account.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -496,6 +496,8 @@ func setAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSets: appOpts.helmSets})
|
||||
case "helm-set-string":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSetStrings: appOpts.helmSetStrings})
|
||||
case "helm-set-file":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
case "directory-recurse":
|
||||
spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
case "config-management-plugin":
|
||||
@@ -512,6 +514,8 @@ func setAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
case "kustomize-image":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
case "kustomize-version":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
case "jsonnet-tla-str":
|
||||
setJsonnetOpt(&spec.Source, appOpts.jsonnetTlaStr, false)
|
||||
case "jsonnet-tla-code":
|
||||
@@ -523,14 +527,36 @@ func setAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
case "sync-policy":
|
||||
switch appOpts.syncPolicy {
|
||||
case "automated":
|
||||
spec.SyncPolicy = &argoappv1.SyncPolicy{
|
||||
Automated: &argoappv1.SyncPolicyAutomated{},
|
||||
if spec.SyncPolicy == nil {
|
||||
spec.SyncPolicy = &argoappv1.SyncPolicy{}
|
||||
}
|
||||
spec.SyncPolicy.Automated = &argoappv1.SyncPolicyAutomated{}
|
||||
case "none":
|
||||
spec.SyncPolicy = nil
|
||||
if spec.SyncPolicy != nil {
|
||||
spec.SyncPolicy.Automated = nil
|
||||
}
|
||||
if spec.SyncPolicy.IsZero() {
|
||||
spec.SyncPolicy = nil
|
||||
}
|
||||
default:
|
||||
log.Fatalf("Invalid sync-policy: %s", appOpts.syncPolicy)
|
||||
}
|
||||
case "sync-option":
|
||||
if spec.SyncPolicy == nil {
|
||||
spec.SyncPolicy = &argoappv1.SyncPolicy{}
|
||||
}
|
||||
for _, option := range appOpts.syncOptions {
|
||||
// `!` means remove the option
|
||||
if strings.HasPrefix(option, "!") {
|
||||
option = strings.TrimPrefix(option, "!")
|
||||
spec.SyncPolicy.SyncOptions = spec.SyncPolicy.SyncOptions.RemoveOption(option)
|
||||
} else {
|
||||
spec.SyncPolicy.SyncOptions = spec.SyncPolicy.SyncOptions.AddOption(option)
|
||||
}
|
||||
}
|
||||
if spec.SyncPolicy.IsZero() {
|
||||
spec.SyncPolicy = nil
|
||||
}
|
||||
}
|
||||
})
|
||||
if flags.Changed("auto-prune") {
|
||||
@@ -565,12 +591,14 @@ type kustomizeOpts struct {
|
||||
namePrefix string
|
||||
nameSuffix string
|
||||
images []string
|
||||
version string
|
||||
}
|
||||
|
||||
func setKustomizeOpt(src *argoappv1.ApplicationSource, opts kustomizeOpts) {
|
||||
if src.Kustomize == nil {
|
||||
src.Kustomize = &argoappv1.ApplicationSourceKustomize{}
|
||||
}
|
||||
src.Kustomize.Version = opts.version
|
||||
src.Kustomize.NamePrefix = opts.namePrefix
|
||||
src.Kustomize.NameSuffix = opts.nameSuffix
|
||||
for _, image := range opts.images {
|
||||
@@ -586,6 +614,7 @@ type helmOpts struct {
|
||||
releaseName string
|
||||
helmSets []string
|
||||
helmSetStrings []string
|
||||
helmSetFiles []string
|
||||
}
|
||||
|
||||
func setHelmOpt(src *argoappv1.ApplicationSource, opts helmOpts) {
|
||||
@@ -612,6 +641,13 @@ func setHelmOpt(src *argoappv1.ApplicationSource, opts helmOpts) {
|
||||
}
|
||||
src.Helm.AddParameter(*p)
|
||||
}
|
||||
for _, text := range opts.helmSetFiles {
|
||||
p, err := argoappv1.NewHelmFileParameter(text)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
src.Helm.AddFileParameter(*p)
|
||||
}
|
||||
if src.Helm.IsZero() {
|
||||
src.Helm = nil
|
||||
}
|
||||
@@ -621,31 +657,8 @@ func setJsonnetOpt(src *argoappv1.ApplicationSource, tlaParameters []string, cod
|
||||
if src.Directory == nil {
|
||||
src.Directory = &argoappv1.ApplicationSourceDirectory{}
|
||||
}
|
||||
|
||||
if len(tlaParameters) != 0 {
|
||||
tlas := make([]argoappv1.JsonnetVar, len(tlaParameters))
|
||||
for index, paramStr := range tlaParameters {
|
||||
parts := strings.SplitN(paramStr, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
log.Fatalf("Expected parameter of the form: param=value. Received: %s", paramStr)
|
||||
break
|
||||
}
|
||||
tlas[index] = argoappv1.JsonnetVar{
|
||||
Name: parts[0],
|
||||
Value: parts[1],
|
||||
Code: code}
|
||||
}
|
||||
var existingTLAs []argoappv1.JsonnetVar
|
||||
for i := range src.Directory.Jsonnet.TLAs {
|
||||
if src.Directory.Jsonnet.TLAs[i].Code != code {
|
||||
existingTLAs = append(existingTLAs, src.Directory.Jsonnet.TLAs[i])
|
||||
}
|
||||
}
|
||||
src.Directory.Jsonnet.TLAs = append(existingTLAs, tlas...)
|
||||
}
|
||||
|
||||
if src.Directory.IsZero() {
|
||||
src.Directory = nil
|
||||
for _, j := range tlaParameters {
|
||||
src.Directory.Jsonnet.TLAs = append(src.Directory.Jsonnet.TLAs, argoappv1.NewJsonnetVar(j, code))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -672,8 +685,10 @@ type appOptions struct {
|
||||
releaseName string
|
||||
helmSets []string
|
||||
helmSetStrings []string
|
||||
helmSetFiles []string
|
||||
project string
|
||||
syncPolicy string
|
||||
syncOptions []string
|
||||
autoPrune bool
|
||||
selfHeal bool
|
||||
namePrefix string
|
||||
@@ -685,6 +700,7 @@ type appOptions struct {
|
||||
jsonnetExtVarStr []string
|
||||
jsonnetExtVarCode []string
|
||||
kustomizeImages []string
|
||||
kustomizeVersion string
|
||||
}
|
||||
|
||||
func addAppFlags(command *cobra.Command, opts *appOptions) {
|
||||
@@ -701,12 +717,15 @@ func addAppFlags(command *cobra.Command, opts *appOptions) {
|
||||
command.Flags().StringVar(&opts.releaseName, "release-name", "", "Helm release-name")
|
||||
command.Flags().StringArrayVar(&opts.helmSets, "helm-set", []string{}, "Helm set values on the command line (can be repeated to set several values: --helm-set key1=val1 --helm-set key2=val2)")
|
||||
command.Flags().StringArrayVar(&opts.helmSetStrings, "helm-set-string", []string{}, "Helm set STRING values on the command line (can be repeated to set several values: --helm-set-string key1=val1 --helm-set-string key2=val2)")
|
||||
command.Flags().StringArrayVar(&opts.helmSetFiles, "helm-set-file", []string{}, "Helm set values from respective files specified via the command line (can be repeated to set several values: --helm-set-file key1=path1 --helm-set-file key2=path2)")
|
||||
command.Flags().StringVar(&opts.project, "project", "", "Application project name")
|
||||
command.Flags().StringVar(&opts.syncPolicy, "sync-policy", "", "Set the sync policy (one of: automated, none)")
|
||||
command.Flags().StringArrayVar(&opts.syncOptions, "sync-option", []string{}, "Add or remove a sync options, e.g add `Prune=false`. Remove using `!` prefix, e.g. `!Prune=false`")
|
||||
command.Flags().BoolVar(&opts.autoPrune, "auto-prune", false, "Set automatic pruning when sync is automated")
|
||||
command.Flags().BoolVar(&opts.selfHeal, "self-heal", false, "Set self healing when sync is automated")
|
||||
command.Flags().StringVar(&opts.namePrefix, "nameprefix", "", "Kustomize nameprefix")
|
||||
command.Flags().StringVar(&opts.nameSuffix, "namesuffix", "", "Kustomize namesuffix")
|
||||
command.Flags().StringVar(&opts.nameSuffix, "kustomize-version", "", "Kustomize version")
|
||||
command.Flags().BoolVar(&opts.directoryRecurse, "directory-recurse", false, "Recurse directory")
|
||||
command.Flags().StringVar(&opts.configManagementPlugin, "config-management-plugin", "", "Config management plugin name")
|
||||
command.Flags().StringArrayVar(&opts.jsonnetTlaStr, "jsonnet-tla-str", []string{}, "Jsonnet top level string arguments")
|
||||
@@ -719,14 +738,27 @@ func addAppFlags(command *cobra.Command, opts *appOptions) {
|
||||
// NewApplicationUnsetCommand returns a new instance of an `argocd app unset` command
|
||||
func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
parameters []string
|
||||
valuesFiles []string
|
||||
parameters []string
|
||||
valuesFiles []string
|
||||
nameSuffix bool
|
||||
namePrefix bool
|
||||
kustomizeVersion bool
|
||||
kustomizeImages []string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "unset APPNAME -p COMPONENT=PARAM",
|
||||
Use: "unset APPNAME parameters",
|
||||
Short: "Unset application parameters",
|
||||
Example: ` # Unset kustomize override kustomize image
|
||||
argocd app unset my-app --kustomize-image=alpine
|
||||
|
||||
# Unset kustomize override prefix
|
||||
argocd app unset my-app --namesuffix
|
||||
|
||||
# Unset parameter override
|
||||
argocd app unset my-app -p COMPONENT=PARAM`,
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 1 || (len(parameters) == 0 && len(valuesFiles) == 0) {
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -737,7 +769,41 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
errors.CheckError(err)
|
||||
|
||||
updated := false
|
||||
if app.Spec.Source.Kustomize != nil {
|
||||
if namePrefix {
|
||||
updated = true
|
||||
app.Spec.Source.Kustomize.NamePrefix = ""
|
||||
}
|
||||
|
||||
if nameSuffix {
|
||||
updated = true
|
||||
app.Spec.Source.Kustomize.NameSuffix = ""
|
||||
}
|
||||
|
||||
if kustomizeVersion {
|
||||
updated = true
|
||||
app.Spec.Source.Kustomize.Version = ""
|
||||
}
|
||||
|
||||
for _, kustomizeImage := range kustomizeImages {
|
||||
for i, item := range app.Spec.Source.Kustomize.Images {
|
||||
if argoappv1.KustomizeImage(kustomizeImage).Match(item) {
|
||||
updated = true
|
||||
//remove i
|
||||
a := app.Spec.Source.Kustomize.Images
|
||||
copy(a[i:], a[i+1:]) // Shift a[i+1:] left one index.
|
||||
a[len(a)-1] = "" // Erase last element (write zero value).
|
||||
a = a[:len(a)-1] // Truncate slice.
|
||||
app.Spec.Source.Kustomize.Images = a
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if app.Spec.Source.Ksonnet != nil {
|
||||
if len(parameters) == 0 && len(valuesFiles) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, paramStr := range parameters {
|
||||
parts := strings.SplitN(paramStr, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
@@ -754,6 +820,10 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
}
|
||||
}
|
||||
if app.Spec.Source.Helm != nil {
|
||||
if len(parameters) == 0 && len(valuesFiles) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, paramStr := range parameters {
|
||||
helmParams := app.Spec.Source.Helm.Parameters
|
||||
for i, p := range helmParams {
|
||||
@@ -789,6 +859,10 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
}
|
||||
command.Flags().StringArrayVarP(¶meters, "parameter", "p", []string{}, "unset a parameter override (e.g. -p guestbook=image)")
|
||||
command.Flags().StringArrayVar(&valuesFiles, "values", []string{}, "unset one or more helm values files")
|
||||
command.Flags().BoolVar(&nameSuffix, "namesuffix", false, "Kustomize namesuffix")
|
||||
command.Flags().BoolVar(&namePrefix, "nameprefix", false, "Kustomize nameprefix")
|
||||
command.Flags().BoolVar(&kustomizeVersion, "kustomize-version", false, "Kustomize version")
|
||||
command.Flags().StringArrayVar(&kustomizeImages, "kustomize-image", []string{}, "Kustomize images name (e.g. --kustomize-image node --kustomize-image mysql)")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -818,8 +892,9 @@ func liveObjects(resources []*argoappv1.ResourceDiff) ([]*unstructured.Unstructu
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
func getLocalObjects(app *argoappv1.Application, local, appLabelKey, kubeVersion string, kustomizeOptions *argoappv1.KustomizeOptions) []*unstructured.Unstructured {
|
||||
manifestStrings := getLocalObjectsString(app, local, appLabelKey, kubeVersion, kustomizeOptions)
|
||||
func getLocalObjects(app *argoappv1.Application, local, appLabelKey, kubeVersion string, kustomizeOptions *argoappv1.KustomizeOptions,
|
||||
configManagementPlugins []*argoappv1.ConfigManagementPlugin) []*unstructured.Unstructured {
|
||||
manifestStrings := getLocalObjectsString(app, local, appLabelKey, kubeVersion, kustomizeOptions, configManagementPlugins)
|
||||
objs := make([]*unstructured.Unstructured, len(manifestStrings))
|
||||
for i := range manifestStrings {
|
||||
obj := unstructured.Unstructured{}
|
||||
@@ -830,7 +905,8 @@ func getLocalObjects(app *argoappv1.Application, local, appLabelKey, kubeVersion
|
||||
return objs
|
||||
}
|
||||
|
||||
func getLocalObjectsString(app *argoappv1.Application, local, appLabelKey, kubeVersion string, kustomizeOptions *argoappv1.KustomizeOptions) []string {
|
||||
func getLocalObjectsString(app *argoappv1.Application, local, appLabelKey, kubeVersion string, kustomizeOptions *argoappv1.KustomizeOptions,
|
||||
configManagementPlugins []*argoappv1.ConfigManagementPlugin) []string {
|
||||
res, err := repository.GenerateManifests(local, "/", app.Spec.Source.TargetRevision, &repoapiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{Repo: app.Spec.Source.RepoURL},
|
||||
AppLabelKey: appLabelKey,
|
||||
@@ -839,7 +915,8 @@ func getLocalObjectsString(app *argoappv1.Application, local, appLabelKey, kubeV
|
||||
ApplicationSource: &app.Spec.Source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: kubeVersion,
|
||||
})
|
||||
Plugins: configManagementPlugins,
|
||||
}, true)
|
||||
errors.CheckError(err)
|
||||
|
||||
return res.Manifests
|
||||
@@ -919,7 +996,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
defer util.Close(conn)
|
||||
cluster, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Server: app.Spec.Destination.Server})
|
||||
errors.CheckError(err)
|
||||
localObjs := groupLocalObjs(getLocalObjects(app, local, argoSettings.AppLabelKey, cluster.ServerVersion, argoSettings.KustomizeOptions), liveObjs, app.Spec.Destination.Namespace)
|
||||
localObjs := groupLocalObjs(getLocalObjects(app, local, argoSettings.AppLabelKey, cluster.ServerVersion, argoSettings.KustomizeOptions, argoSettings.ConfigManagementPlugins), liveObjs, app.Spec.Destination.Namespace)
|
||||
for _, res := range resources.Items {
|
||||
var live = &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(res.NormalizedLiveState), &live)
|
||||
@@ -1004,9 +1081,9 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
var live *unstructured.Unstructured
|
||||
var target *unstructured.Unstructured
|
||||
if item.target != nil && item.live != nil {
|
||||
target = item.live
|
||||
live = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal(diffRes.PredictedLive, live)
|
||||
target = &unstructured.Unstructured{}
|
||||
live = item.live
|
||||
err = json.Unmarshal(diffRes.PredictedLive, target)
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
live = item.live
|
||||
@@ -1014,7 +1091,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
|
||||
foundDiffs = true
|
||||
_ = diff.PrintDiff(item.key.Name, target, live)
|
||||
_ = diff.PrintDiff(item.key.Name, live, target)
|
||||
}
|
||||
}
|
||||
if foundDiffs {
|
||||
@@ -1382,7 +1459,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
cluster, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Server: app.Spec.Destination.Server})
|
||||
errors.CheckError(err)
|
||||
util.Close(conn)
|
||||
localObjsStrings = getLocalObjectsString(app, local, argoSettings.AppLabelKey, cluster.ServerVersion, argoSettings.KustomizeOptions)
|
||||
localObjsStrings = getLocalObjectsString(app, local, argoSettings.AppLabelKey, cluster.ServerVersion, argoSettings.KustomizeOptions, argoSettings.ConfigManagementPlugins)
|
||||
}
|
||||
|
||||
syncReq := applicationpkg.ApplicationSyncRequest{
|
||||
@@ -1411,15 +1488,15 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
app, err := waitOnApplicationStatus(acdClient, appName, timeout, false, false, true, false, selectedResources)
|
||||
errors.CheckError(err)
|
||||
|
||||
// Only get resources to be pruned if sync was application-wide
|
||||
if len(selectedResources) == 0 {
|
||||
pruningRequired := app.Status.OperationState.SyncResult.Resources.PruningRequired()
|
||||
if pruningRequired > 0 {
|
||||
log.Fatalf("%d resources require pruning", pruningRequired)
|
||||
}
|
||||
|
||||
if !app.Status.OperationState.Phase.Successful() && !dryRun {
|
||||
os.Exit(1)
|
||||
if !dryRun {
|
||||
if !app.Status.OperationState.Phase.Successful() {
|
||||
log.Fatalf("Operation has completed with phase: %s", app.Status.OperationState.Phase)
|
||||
} else if len(selectedResources) == 0 && app.Status.Sync.Status != argoappv1.SyncStatusCodeSynced {
|
||||
// Only get resources to be pruned if sync was application-wide and final status is not synced
|
||||
pruningRequired := app.Status.OperationState.SyncResult.Resources.PruningRequired()
|
||||
if pruningRequired > 0 {
|
||||
log.Fatalf("%d resources require pruning", pruningRequired)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1575,7 +1652,7 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
// time when the sync status lags behind when an operation completes
|
||||
refresh := false
|
||||
|
||||
printFinalStatus := func(app *argoappv1.Application) {
|
||||
printFinalStatus := func(app *argoappv1.Application) *argoappv1.Application {
|
||||
var err error
|
||||
if refresh {
|
||||
conn, appClient := acdClient.NewApplicationClientOrDie()
|
||||
@@ -1598,6 +1675,7 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
if timeout != 0 {
|
||||
@@ -1618,8 +1696,21 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
|
||||
for appEvent := range appEventCh {
|
||||
app = &appEvent.Application
|
||||
|
||||
operationInProgress := false
|
||||
// consider the operation is in progress
|
||||
if app.Operation != nil {
|
||||
// if it just got requested
|
||||
operationInProgress = true
|
||||
refresh = true
|
||||
} else if app.Status.OperationState != nil {
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
// if it is not finished yet
|
||||
operationInProgress = true
|
||||
} else if app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Before(app.Status.OperationState.FinishedAt) {
|
||||
// if it is just finished and we need to wait for controller to reconcile app once after syncing
|
||||
operationInProgress = true
|
||||
}
|
||||
}
|
||||
|
||||
var selectedResourcesAreReady bool
|
||||
@@ -1639,8 +1730,8 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
selectedResourcesAreReady = checkResourceStatus(watchSync, watchHealth, watchOperation, watchSuspended, app.Status.Health.Status, string(app.Status.Sync.Status), appEvent.Application.Operation)
|
||||
}
|
||||
|
||||
if selectedResourcesAreReady {
|
||||
printFinalStatus(app)
|
||||
if selectedResourcesAreReady && !operationInProgress {
|
||||
app = printFinalStatus(app)
|
||||
return app, nil
|
||||
}
|
||||
|
||||
@@ -1650,7 +1741,7 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
stateKey := newState.Key()
|
||||
if prevState, found := prevStates[stateKey]; found {
|
||||
if watchHealth && prevState.Health != argoappv1.HealthStatusUnknown && prevState.Health != argoappv1.HealthStatusDegraded && newState.Health == argoappv1.HealthStatusDegraded {
|
||||
printFinalStatus(app)
|
||||
_ = printFinalStatus(app)
|
||||
return nil, fmt.Errorf("application '%s' health state has transitioned from %s to %s", appName, prevState.Health, newState.Health)
|
||||
}
|
||||
doPrint = prevState.Merge(newState)
|
||||
@@ -1664,7 +1755,7 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
}
|
||||
_ = w.Flush()
|
||||
}
|
||||
printFinalStatus(app)
|
||||
_ = printFinalStatus(app)
|
||||
return nil, fmt.Errorf("timed out (%ds) waiting for app %q match desired state", timeout, appName)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package commands
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
@@ -34,4 +35,70 @@ func Test_setHelmOpt(t *testing.T) {
|
||||
setHelmOpt(&src, helmOpts{helmSetStrings: []string{"foo=bar"}})
|
||||
assert.Equal(t, []v1alpha1.HelmParameter{{Name: "foo", Value: "bar", ForceString: true}}, src.Helm.Parameters)
|
||||
})
|
||||
t.Run("HelmSetFiles", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setHelmOpt(&src, helmOpts{helmSetFiles: []string{"foo=bar"}})
|
||||
assert.Equal(t, []v1alpha1.HelmFileParameter{{Name: "foo", Path: "bar"}}, src.Helm.FileParameters)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_setJsonnetOpt(t *testing.T) {
|
||||
t.Run("TlaSets", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setJsonnetOpt(&src, []string{"foo=bar"}, false)
|
||||
assert.Equal(t, []v1alpha1.JsonnetVar{{Name: "foo", Value: "bar"}}, src.Directory.Jsonnet.TLAs)
|
||||
setJsonnetOpt(&src, []string{"bar=baz"}, false)
|
||||
assert.Equal(t, []v1alpha1.JsonnetVar{{Name: "foo", Value: "bar"}, {Name: "bar", Value: "baz"}}, src.Directory.Jsonnet.TLAs)
|
||||
})
|
||||
t.Run("ExtSets", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setJsonnetOptExtVar(&src, []string{"foo=bar"}, false)
|
||||
assert.Equal(t, []v1alpha1.JsonnetVar{{Name: "foo", Value: "bar"}}, src.Directory.Jsonnet.ExtVars)
|
||||
setJsonnetOptExtVar(&src, []string{"bar=baz"}, false)
|
||||
assert.Equal(t, []v1alpha1.JsonnetVar{{Name: "foo", Value: "bar"}, {Name: "bar", Value: "baz"}}, src.Directory.Jsonnet.ExtVars)
|
||||
})
|
||||
}
|
||||
|
||||
type appOptionsFixture struct {
|
||||
spec *v1alpha1.ApplicationSpec
|
||||
command *cobra.Command
|
||||
options *appOptions
|
||||
}
|
||||
|
||||
func (f *appOptionsFixture) SetFlag(key, value string) error {
|
||||
err := f.command.Flags().Set(key, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = setAppSpecOptions(f.command.Flags(), f.spec, f.options)
|
||||
return err
|
||||
}
|
||||
|
||||
func newAppOptionsFixture() *appOptionsFixture {
|
||||
fixture := &appOptionsFixture{
|
||||
spec: &v1alpha1.ApplicationSpec{},
|
||||
command: &cobra.Command{},
|
||||
options: &appOptions{},
|
||||
}
|
||||
addAppFlags(fixture.command, fixture.options)
|
||||
return fixture
|
||||
}
|
||||
|
||||
func Test_setAppSpecOptions(t *testing.T) {
|
||||
f := newAppOptionsFixture()
|
||||
t.Run("SyncPolicy", func(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlag("sync-policy", "automated"))
|
||||
assert.NotNil(t, f.spec.SyncPolicy.Automated)
|
||||
|
||||
assert.NoError(t, f.SetFlag("sync-policy", "none"))
|
||||
assert.Nil(t, f.spec.SyncPolicy)
|
||||
})
|
||||
t.Run("SyncOptions", func(t *testing.T) {
|
||||
assert.NoError(t, f.SetFlag("sync-option", "a=1"))
|
||||
assert.True(t, f.spec.SyncPolicy.SyncOptions.HasOption("a=1"))
|
||||
|
||||
// remove the options using !
|
||||
assert.NoError(t, f.SetFlag("sync-option", "!a=1"))
|
||||
assert.Nil(t, f.spec.SyncPolicy)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -174,18 +174,20 @@ func NewCertAddSSHCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
}
|
||||
|
||||
for _, knownHostsEntry := range sshKnownHostsLists {
|
||||
hostname, certSubType, certData, err := certutil.TokenizeSSHKnownHostsEntry(knownHostsEntry)
|
||||
_, certSubType, certData, err := certutil.TokenizeSSHKnownHostsEntry(knownHostsEntry)
|
||||
errors.CheckError(err)
|
||||
_, _, err = certutil.KnownHostsLineToPublicKey(knownHostsEntry)
|
||||
hostnameList, _, err := certutil.KnownHostsLineToPublicKey(knownHostsEntry)
|
||||
errors.CheckError(err)
|
||||
certificate := appsv1.RepositoryCertificate{
|
||||
ServerName: hostname,
|
||||
CertType: "ssh",
|
||||
CertSubType: certSubType,
|
||||
CertData: certData,
|
||||
// Each key could be valid for multiple hostnames
|
||||
for _, hostname := range hostnameList {
|
||||
certificate := appsv1.RepositoryCertificate{
|
||||
ServerName: hostname,
|
||||
CertType: "ssh",
|
||||
CertSubType: certSubType,
|
||||
CertData: certData,
|
||||
}
|
||||
certificates = append(certificates, certificate)
|
||||
}
|
||||
|
||||
certificates = append(certificates, certificate)
|
||||
}
|
||||
|
||||
certList := &appsv1.RepositoryCertificateList{Items: certificates}
|
||||
|
||||
@@ -60,6 +60,7 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
var (
|
||||
inCluster bool
|
||||
upsert bool
|
||||
serviceAccount string
|
||||
awsRoleArn string
|
||||
awsClusterName string
|
||||
systemNamespace string
|
||||
@@ -101,7 +102,11 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
// Install RBAC resources for managing the cluster
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
errors.CheckError(err)
|
||||
managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, systemNamespace, namespaces)
|
||||
if serviceAccount != "" {
|
||||
managerBearerToken, err = clusterauth.GetServiceAccountBearerToken(clientset, systemNamespace, serviceAccount)
|
||||
} else {
|
||||
managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, systemNamespace, namespaces)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
}
|
||||
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
|
||||
@@ -122,7 +127,8 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
command.PersistentFlags().StringVar(&pathOpts.LoadingRules.ExplicitPath, pathOpts.ExplicitFileFlag, pathOpts.LoadingRules.ExplicitPath, "use a particular kubeconfig file")
|
||||
command.Flags().BoolVar(&inCluster, "in-cluster", false, "Indicates Argo CD resides inside this cluster and should connect using the internal k8s hostname (kubernetes.default.svc)")
|
||||
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs")
|
||||
command.Flags().StringVar(&awsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws-iam-authenticator will be used to access cluster")
|
||||
command.Flags().StringVar(&serviceAccount, "service-account", "", fmt.Sprintf("System namespace service account to use for kubernetes resource management. If not set then default \"%s\" SA will be created", clusterauth.ArgoCDManagerServiceAccount))
|
||||
command.Flags().StringVar(&awsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws cli eks token command will be used to access cluster")
|
||||
command.Flags().StringVar(&awsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.")
|
||||
command.Flags().StringVar(&systemNamespace, "system-namespace", common.DefaultSystemNamespace, "Use different system namespace")
|
||||
command.Flags().StringArrayVar(&namespaces, "namespace", nil, "List of namespaces which are allowed to manage")
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
@@ -64,11 +65,12 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
}
|
||||
}
|
||||
clientOpts := argocdclient.ClientOptions{
|
||||
ConfigPath: "",
|
||||
ServerAddr: server,
|
||||
Insecure: globalClientOpts.Insecure,
|
||||
PlainText: globalClientOpts.PlainText,
|
||||
GRPCWeb: globalClientOpts.GRPCWeb,
|
||||
ConfigPath: "",
|
||||
ServerAddr: server,
|
||||
Insecure: globalClientOpts.Insecure,
|
||||
PlainText: globalClientOpts.PlainText,
|
||||
GRPCWeb: globalClientOpts.GRPCWeb,
|
||||
GRPCWebRootPath: globalClientOpts.GRPCWebRootPath,
|
||||
}
|
||||
acdClient := argocdclient.NewClientOrDie(&clientOpts)
|
||||
setConn, setIf := acdClient.NewSettingsClientOrDie()
|
||||
@@ -76,6 +78,10 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
|
||||
if ctxName == "" {
|
||||
ctxName = server
|
||||
if globalClientOpts.GRPCWebRootPath != "" {
|
||||
rootPath := strings.TrimRight(strings.TrimLeft(globalClientOpts.GRPCWebRootPath, "/"), "/")
|
||||
ctxName = fmt.Sprintf("%s/%s", server, rootPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the login
|
||||
@@ -110,10 +116,11 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
localCfg = &localconfig.LocalConfig{}
|
||||
}
|
||||
localCfg.UpsertServer(localconfig.Server{
|
||||
Server: server,
|
||||
PlainText: globalClientOpts.PlainText,
|
||||
Insecure: globalClientOpts.Insecure,
|
||||
GRPCWeb: globalClientOpts.GRPCWeb,
|
||||
Server: server,
|
||||
PlainText: globalClientOpts.PlainText,
|
||||
Insecure: globalClientOpts.Insecure,
|
||||
GRPCWeb: globalClientOpts.GRPCWeb,
|
||||
GRPCWebRootPath: globalClientOpts.GRPCWebRootPath,
|
||||
})
|
||||
localCfg.UpsertUser(localconfig.User{
|
||||
Name: ctxName,
|
||||
|
||||
@@ -340,11 +340,12 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
return command
|
||||
}
|
||||
|
||||
func modifyProjectResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, action func(proj *v1alpha1.AppProject, group string, kind string) bool) *cobra.Command {
|
||||
func modifyClusterResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, action func(proj *v1alpha1.AppProject, group string, kind string) bool) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: cmdUse,
|
||||
Short: cmdDesc,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
if len(args) != 3 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -364,11 +365,55 @@ func modifyProjectResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.C
|
||||
}
|
||||
}
|
||||
|
||||
func modifyNamespaceResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, action func(proj *v1alpha1.AppProject, group string, kind string, useWhitelist bool) bool) *cobra.Command {
|
||||
var (
|
||||
list string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: cmdUse,
|
||||
Short: cmdDesc,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 3 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
projName, group, kind := args[0], args[1], args[2]
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
var useWhitelist = false
|
||||
if list == "white" {
|
||||
useWhitelist = true
|
||||
}
|
||||
if action(proj, group, kind, useWhitelist) {
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&list, "list", "l", "black", "Use blacklist or whitelist. This can only be 'white' or 'black'")
|
||||
return command
|
||||
}
|
||||
|
||||
// NewProjectAllowNamespaceResourceCommand returns a new instance of an `deny-cluster-resources` command
|
||||
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "allow-namespace-resource PROJECT GROUP KIND"
|
||||
desc := "Removes a namespaced API resource from the blacklist"
|
||||
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
|
||||
desc := "Removes a namespaced API resource from the blacklist or add a namespaced API resource to the whitelist"
|
||||
|
||||
return modifyNamespaceResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string, useWhitelist bool) bool {
|
||||
if useWhitelist {
|
||||
for _, item := range proj.Spec.NamespaceResourceWhitelist {
|
||||
if item.Group == group && item.Kind == kind {
|
||||
fmt.Printf("Group '%s' and kind '%s' already present in whitelisted namespaced resources\n", group, kind)
|
||||
return false
|
||||
}
|
||||
}
|
||||
proj.Spec.NamespaceResourceWhitelist = append(proj.Spec.NamespaceResourceWhitelist, v1.GroupKind{Group: group, Kind: kind})
|
||||
fmt.Printf("Group '%s' and kind '%s' is added to whitelisted namespaced resources\n", group, kind)
|
||||
return true
|
||||
}
|
||||
index := -1
|
||||
for i, item := range proj.Spec.NamespaceResourceBlacklist {
|
||||
if item.Group == group && item.Kind == kind {
|
||||
@@ -381,6 +426,7 @@ func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOpti
|
||||
return false
|
||||
}
|
||||
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist[:index], proj.Spec.NamespaceResourceBlacklist[index+1:]...)
|
||||
fmt.Printf("Group '%s' and kind '%s' is removed from blacklisted namespaced resources\n", group, kind)
|
||||
return true
|
||||
})
|
||||
}
|
||||
@@ -388,8 +434,25 @@ func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOpti
|
||||
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
|
||||
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "deny-namespace-resource PROJECT GROUP KIND"
|
||||
desc := "Adds a namespaced API resource to the blacklist"
|
||||
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
|
||||
desc := "Adds a namespaced API resource to the blacklist or removes a namespaced API resource from the whitelist"
|
||||
return modifyNamespaceResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string, useWhitelist bool) bool {
|
||||
if useWhitelist {
|
||||
index := -1
|
||||
for i, item := range proj.Spec.NamespaceResourceWhitelist {
|
||||
if item.Group == group && item.Kind == kind {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index == -1 {
|
||||
fmt.Printf("Group '%s' and kind '%s' not in whitelisted namespaced resources\n", group, kind)
|
||||
return false
|
||||
}
|
||||
proj.Spec.NamespaceResourceWhitelist = append(proj.Spec.NamespaceResourceWhitelist[:index], proj.Spec.NamespaceResourceWhitelist[index+1:]...)
|
||||
fmt.Printf("Group '%s' and kind '%s' is removed from whitelisted namespaced resources\n", group, kind)
|
||||
return true
|
||||
}
|
||||
|
||||
for _, item := range proj.Spec.NamespaceResourceBlacklist {
|
||||
if item.Group == group && item.Kind == kind {
|
||||
fmt.Printf("Group '%s' and kind '%s' already present in blacklisted namespaced resources\n", group, kind)
|
||||
@@ -397,6 +460,7 @@ func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptio
|
||||
}
|
||||
}
|
||||
proj.Spec.NamespaceResourceBlacklist = append(proj.Spec.NamespaceResourceBlacklist, v1.GroupKind{Group: group, Kind: kind})
|
||||
fmt.Printf("Group '%s' and kind '%s' is added to blacklisted namespaced resources\n", group, kind)
|
||||
return true
|
||||
})
|
||||
}
|
||||
@@ -405,7 +469,7 @@ func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptio
|
||||
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "deny-cluster-resource PROJECT GROUP KIND"
|
||||
desc := "Removes a cluster-scoped API resource from the whitelist"
|
||||
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
|
||||
return modifyClusterResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
|
||||
index := -1
|
||||
for i, item := range proj.Spec.ClusterResourceWhitelist {
|
||||
if item.Group == group && item.Kind == kind {
|
||||
@@ -426,7 +490,7 @@ func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions
|
||||
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "allow-cluster-resource PROJECT GROUP KIND"
|
||||
desc := "Adds a cluster-scoped API resource to the whitelist"
|
||||
return modifyProjectResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
|
||||
return modifyClusterResourceCmd(use, desc, clientOpts, func(proj *v1alpha1.AppProject, group string, kind string) bool {
|
||||
for _, item := range proj.Spec.ClusterResourceWhitelist {
|
||||
if item.Group == group && item.Kind == kind {
|
||||
fmt.Printf("Group '%s' and kind '%s' already present in whitelisted cluster resources\n", group, kind)
|
||||
|
||||
@@ -43,11 +43,12 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
var tokenString string
|
||||
var refreshToken string
|
||||
clientOpts := argocdclient.ClientOptions{
|
||||
ConfigPath: "",
|
||||
ServerAddr: configCtx.Server.Server,
|
||||
Insecure: configCtx.Server.Insecure,
|
||||
GRPCWeb: globalClientOpts.GRPCWeb,
|
||||
PlainText: configCtx.Server.PlainText,
|
||||
ConfigPath: "",
|
||||
ServerAddr: configCtx.Server.Server,
|
||||
Insecure: configCtx.Server.Insecure,
|
||||
GRPCWeb: globalClientOpts.GRPCWeb,
|
||||
GRPCWebRootPath: globalClientOpts.GRPCWebRootPath,
|
||||
PlainText: configCtx.Server.PlainText,
|
||||
}
|
||||
acdClient := argocdclient.NewClientOrDie(&clientOpts)
|
||||
claims, err := configCtx.User.Claims()
|
||||
|
||||
@@ -52,7 +52,10 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
|
||||
// For better readability and easier formatting
|
||||
var repoAddExamples = ` # Add a Git repository via SSH using a private key for authentication, ignoring the server's host key:
|
||||
argocd repo add git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa
|
||||
argocd repo add git@git.example.com:repos/repo --insecure-ignore-host-key --ssh-private-key-path ~/id_rsa
|
||||
|
||||
# Add a Git repository via SSH on a non-default port - need to use ssh:// style URLs here
|
||||
argocd repo add ssh://git@git.example.com:2222/repos/repo --ssh-private-key-path ~/id_rsa
|
||||
|
||||
# Add a private Git repository via HTTPS using username/password and TLS client certificates:
|
||||
argocd repo add https://git.example.com/repos/repo --username git --password secret --tls-client-cert-path ~/mycert.crt --tls-client-cert-key-path ~/mycert.key
|
||||
@@ -162,7 +165,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
Upsert: upsert,
|
||||
}
|
||||
|
||||
createdRepo, err := repoIf.CreateRepository(context.Background(), &repoCreateReq)
|
||||
createdRepo, err := repoIf.Create(context.Background(), &repoCreateReq)
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("repository '%s' added\n", createdRepo.Repo)
|
||||
},
|
||||
@@ -174,7 +177,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command.Flags().StringVar(&sshPrivateKeyPath, "ssh-private-key-path", "", "path to the private ssh key (e.g. ~/.ssh/id_rsa)")
|
||||
command.Flags().StringVar(&tlsClientCertPath, "tls-client-cert-path", "", "path to the TLS client cert (must be PEM format)")
|
||||
command.Flags().StringVar(&tlsClientCertKeyPath, "tls-client-cert-key-path", "", "path to the TLS client cert's key path (must be PEM format)")
|
||||
command.Flags().BoolVar(&insecureIgnoreHostKey, "insecure-ignore-host-key", false, "disables SSH strict host key checking (deprecated, use --insecure-skip-server-validation instead)")
|
||||
command.Flags().BoolVar(&insecureIgnoreHostKey, "insecure-ignore-host-key", false, "disables SSH strict host key checking (deprecated, use --insecure-skip-server-verification instead)")
|
||||
command.Flags().BoolVar(&insecureSkipServerVerification, "insecure-skip-server-verification", false, "disables server certificate and host key checks")
|
||||
command.Flags().BoolVar(&enableLfs, "enable-lfs", false, "enable git-lfs (Large File Support) on this repository")
|
||||
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
|
||||
@@ -194,7 +197,7 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
|
||||
defer util.Close(conn)
|
||||
for _, repoURL := range args {
|
||||
_, err := repoIf.DeleteRepository(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL})
|
||||
_, err := repoIf.Delete(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -250,7 +253,7 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
err := fmt.Errorf("--refresh must be one of: 'hard'")
|
||||
errors.CheckError(err)
|
||||
}
|
||||
repos, err := repoIf.ListRepositories(context.Background(), &repositorypkg.RepoQuery{ForceRefresh: forceRefresh})
|
||||
repos, err := repoIf.List(context.Background(), &repositorypkg.RepoQuery{ForceRefresh: forceRefresh})
|
||||
errors.CheckError(err)
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/util/git"
|
||||
)
|
||||
|
||||
// NewRepoCredsCommand returns a new instance of an `argocd repo` command
|
||||
// NewRepoCredsCommand returns a new instance of an `argocd repocreds` command
|
||||
func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "repocreds",
|
||||
@@ -36,7 +36,7 @@ func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
return command
|
||||
}
|
||||
|
||||
// NewRepoCredsAddCommand returns a new instance of an `argocd repo add` command
|
||||
// NewRepoCredsAddCommand returns a new instance of an `argocd repocreds add` command
|
||||
func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
repo appsv1.RepoCreds
|
||||
@@ -50,8 +50,8 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
var repocredsAddExamples = ` # Add credentials with user/pass authentication to use for all repositories under https://git.example.com/repos
|
||||
argocd repocreds add https://git.example.com/repos/ --username git --password secret
|
||||
|
||||
# Add credentials with SSH private key authentication to use for all repositories under https://git.example.com/repos
|
||||
argocd repocreds add https://git.example.com/repos/ --ssh-private-key-path ~/.ssh/id_rsa
|
||||
# Add credentials with SSH private key authentication to use for all repositories under ssh://git@git.example.com/repos
|
||||
argocd repocreds add ssh://git@git.example.com/repos/ --ssh-private-key-path ~/.ssh/id_rsa
|
||||
`
|
||||
|
||||
var command = &cobra.Command{
|
||||
@@ -131,7 +131,7 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
return command
|
||||
}
|
||||
|
||||
// NewRepoCredsRemoveCommand returns a new instance of an `argocd repo list` command
|
||||
// NewRepoCredsRemoveCommand returns a new instance of an `argocd repocreds rm` command
|
||||
func NewRepoCredsRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "rm CREDSURL",
|
||||
@@ -172,7 +172,7 @@ func printRepoCredsUrls(repos []appsv1.RepoCreds) {
|
||||
}
|
||||
}
|
||||
|
||||
// NewRepoCredsListCommand returns a new instance of an `argocd repo rm` command
|
||||
// NewRepoCredsListCommand returns a new instance of an `argocd repo list` command
|
||||
func NewRepoCredsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
output string
|
||||
|
||||
@@ -59,6 +59,7 @@ func NewCommand() *cobra.Command {
|
||||
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", config.GetFlag("server-crt", ""), "Server certificate file")
|
||||
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", ""), "Authentication token")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.GRPCWeb, "grpc-web", config.GetBoolFlag("grpc-web"), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.")
|
||||
command.PersistentFlags().StringVar(&clientOpts.GRPCWebRootPath, "grpc-web-root-path", config.GetFlag("grpc-web-root-path", ""), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.")
|
||||
command.PersistentFlags().StringVar(&logLevel, "loglevel", config.GetFlag("loglevel", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.PersistentFlags().StringSliceVarP(&clientOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.PortForward, "port-forward", config.GetBoolFlag("port-forward"), "Connect to a random argocd-server port using port forwarding")
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Default service addresses and URLS of Argo CD internal services
|
||||
const (
|
||||
// DefaultRepoServerAddr is the gRPC address of the Argo CD repo server
|
||||
@@ -38,8 +44,6 @@ const (
|
||||
|
||||
// Default paths on the pod's file system
|
||||
const (
|
||||
// The default base path where application config is located
|
||||
DefaultPathAppConfig = "/app/config"
|
||||
// The default path where TLS certificates for repositories are located
|
||||
DefaultPathTLSConfig = "/app/config/tls"
|
||||
// The default path where SSH known hosts are stored
|
||||
@@ -62,10 +66,8 @@ const (
|
||||
AuthCookieName = "argocd.token"
|
||||
// RevisionHistoryLimit is the max number of successful sync to keep in history
|
||||
RevisionHistoryLimit = 10
|
||||
// K8sClientConfigQPS controls the QPS to be used in K8s REST client configs
|
||||
K8sClientConfigQPS = 25
|
||||
// K8sClientConfigBurst controls the burst to be used in K8s REST client configs
|
||||
K8sClientConfigBurst = 50
|
||||
// ChangePasswordSSOTokenMaxAge is the max token age for password change operation
|
||||
ChangePasswordSSOTokenMaxAge = time.Minute * 5
|
||||
)
|
||||
|
||||
// Dex related constants
|
||||
@@ -138,14 +140,50 @@ const (
|
||||
EnvGitAttemptsCount = "ARGOCD_GIT_ATTEMPTS_COUNT"
|
||||
// Overrides git submodule support, true by default
|
||||
EnvGitSubmoduleEnabled = "ARGOCD_GIT_MODULES_ENABLED"
|
||||
// EnvK8sClientQPS is the QPS value used for the kubernetes client (default: 50)
|
||||
EnvK8sClientQPS = "ARGOCD_K8S_CLIENT_QPS"
|
||||
// EnvK8sClientBurst is the burst value used for the kubernetes client (default: twice the client QPS)
|
||||
EnvK8sClientBurst = "ARGOCD_K8S_CLIENT_BURST"
|
||||
// EnvK8sClientMaxIdleConnections is the number of max idle connections in K8s REST client HTTP transport (default: 500)
|
||||
EnvK8sClientMaxIdleConnections = "ARGOCD_K8S_CLIENT_MAX_IDLE_CONNECTIONS"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinClientVersion is the minimum client version that can interface with this API server.
|
||||
// When introducing breaking changes to the API or datastructures, this number should be bumped.
|
||||
// The value here may be lower than the current value in VERSION
|
||||
MinClientVersion = "1.3.0"
|
||||
MinClientVersion = "1.4.0"
|
||||
// CacheVersion is a objects version cached using util/cache/cache.go.
|
||||
// Number should be bumped in case of backward incompatible change to make sure cache is invalidated after upgrade.
|
||||
CacheVersion = "1.0.0"
|
||||
)
|
||||
|
||||
var (
|
||||
// K8sClientConfigQPS controls the QPS to be used in K8s REST client configs
|
||||
K8sClientConfigQPS float32 = 50
|
||||
// K8sClientConfigBurst controls the burst to be used in K8s REST client configs
|
||||
K8sClientConfigBurst int = 100
|
||||
// K8sMaxIdleConnections controls the number of max idle connections in K8s REST client HTTP transport
|
||||
K8sMaxIdleConnections = 500
|
||||
)
|
||||
|
||||
func init() {
|
||||
if envQPS := os.Getenv(EnvK8sClientQPS); envQPS != "" {
|
||||
if qps, err := strconv.ParseFloat(envQPS, 32); err != nil {
|
||||
K8sClientConfigQPS = float32(qps)
|
||||
}
|
||||
}
|
||||
if envBurst := os.Getenv(EnvK8sClientBurst); envBurst != "" {
|
||||
if burst, err := strconv.Atoi(envBurst); err != nil {
|
||||
K8sClientConfigBurst = burst
|
||||
}
|
||||
} else {
|
||||
K8sClientConfigBurst = 2 * int(K8sClientConfigQPS)
|
||||
}
|
||||
|
||||
if envMaxConn := os.Getenv(EnvK8sClientMaxIdleConnections); envMaxConn != "" {
|
||||
if maxConn, err := strconv.Atoi(envMaxConn); err != nil {
|
||||
K8sMaxIdleConnections = maxConn
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,6 +167,10 @@ func NewApplicationController(
|
||||
return &ctrl, nil
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) GetMetricsServer() *metrics.MetricsServer {
|
||||
return ctrl.metricsServer
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) onKubectlRun(command string) (util.Closer, error) {
|
||||
ctrl.metricsServer.IncKubectlExec(command)
|
||||
if ctrl.kubectlSemaphore != nil {
|
||||
@@ -821,22 +825,20 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
return
|
||||
}
|
||||
|
||||
app := origApp.DeepCopy()
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name})
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
reconcileDuration := time.Since(startTime)
|
||||
ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
|
||||
logCtx := log.WithFields(log.Fields{
|
||||
"application": origApp.Name,
|
||||
"time_ms": reconcileDuration.Seconds() * 1e3,
|
||||
logCtx.WithFields(log.Fields{
|
||||
"time_ms": reconcileDuration.Milliseconds(),
|
||||
"level": comparisonLevel,
|
||||
"dest-server": origApp.Spec.Destination.Server,
|
||||
"dest-namespace": origApp.Spec.Destination.Namespace,
|
||||
})
|
||||
logCtx.Info("Reconciliation completed")
|
||||
}).Info("Reconciliation completed")
|
||||
}()
|
||||
|
||||
app := origApp.DeepCopy()
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name})
|
||||
if comparisonLevel == ComparisonWithNothing {
|
||||
managedResources := make([]*appv1.ResourceDiff, 0)
|
||||
if err := ctrl.cache.GetAppManagedResources(app.Name, &managedResources); err != nil {
|
||||
@@ -888,6 +890,9 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
|
||||
observedAt := metav1.Now()
|
||||
compareResult := ctrl.appStateManager.CompareAppState(app, project, revision, app.Spec.Source, refreshType == appv1.RefreshTypeHard, localManifests)
|
||||
for k, v := range compareResult.timings {
|
||||
logCtx = logCtx.WithField(k, v.Milliseconds())
|
||||
}
|
||||
|
||||
ctrl.normalizeApplication(origApp, app)
|
||||
|
||||
@@ -912,7 +917,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
)
|
||||
}
|
||||
} else {
|
||||
logCtx.Infof("Sync prevented by sync window")
|
||||
logCtx.Info("Sync prevented by sync window")
|
||||
}
|
||||
|
||||
if app.Status.ReconciledAt == nil || comparisonLevel == CompareWithLatest {
|
||||
@@ -1080,14 +1085,30 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return nil
|
||||
}
|
||||
|
||||
if !app.Spec.SyncPolicy.Automated.Prune {
|
||||
requirePruneOnly := true
|
||||
for _, r := range resources {
|
||||
if r.Status != appv1.SyncStatusCodeSynced && !r.RequiresPruning {
|
||||
requirePruneOnly = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if requirePruneOnly {
|
||||
logCtx.Infof("Skipping auto-sync: need to prune extra resources only but automated prune is disabled")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
desiredCommitSHA := syncStatus.Revision
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA)
|
||||
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
|
||||
op := appv1.Operation{
|
||||
Sync: &appv1.SyncOperation{
|
||||
Revision: desiredCommitSHA,
|
||||
Prune: app.Spec.SyncPolicy.Automated.Prune,
|
||||
Revision: desiredCommitSHA,
|
||||
Prune: app.Spec.SyncPolicy.Automated.Prune,
|
||||
SyncOptions: app.Spec.SyncPolicy.SyncOptions,
|
||||
},
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
}
|
||||
// It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g.
|
||||
// auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an
|
||||
|
||||
@@ -111,7 +111,7 @@ func newFakeController(data *fakeData) *ApplicationController {
|
||||
ctrl.stateCache = &mockStateCache
|
||||
mockStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil)
|
||||
mockStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(data.managedLiveObjs, nil)
|
||||
mockStateCache.On("GetServerVersion", mock.Anything).Return("v1.2.3", nil)
|
||||
mockStateCache.On("GetVersionsInfo", mock.Anything).Return("v1.2.3", nil, nil)
|
||||
response := make(map[kube.ResourceKey]argoappv1.ResourceNode)
|
||||
for k, v := range data.namespacedResources {
|
||||
response[k] = v.ResourceNode
|
||||
@@ -228,7 +228,7 @@ func TestAutoSync(t *testing.T) {
|
||||
Status: argoappv1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -240,7 +240,7 @@ func TestAutoSync(t *testing.T) {
|
||||
func TestSkipAutoSync(t *testing.T) {
|
||||
// Verify we skip when we previously synced to it in our most recent history
|
||||
// Set current to 'aaaaa', desired to 'aaaa' and mark system OutOfSync
|
||||
{
|
||||
t.Run("PreviouslySyncedToRevision", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
syncStatus := argoappv1.SyncStatus{
|
||||
@@ -252,10 +252,10 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
}
|
||||
})
|
||||
|
||||
// Verify we skip when we are already Synced (even if revision is different)
|
||||
{
|
||||
t.Run("AlreadyInSyncedState", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
syncStatus := argoappv1.SyncStatus{
|
||||
@@ -267,10 +267,10 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
}
|
||||
})
|
||||
|
||||
// Verify we skip when auto-sync is disabled
|
||||
{
|
||||
t.Run("AutoSyncIsDisabled", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.SyncPolicy = nil
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
@@ -283,10 +283,10 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
}
|
||||
})
|
||||
|
||||
// Verify we skip when application is marked for deletion
|
||||
{
|
||||
t.Run("ApplicationIsMarkedForDeletion", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
now := metav1.Now()
|
||||
app.DeletionTimestamp = &now
|
||||
@@ -300,11 +300,11 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
}
|
||||
})
|
||||
|
||||
// Verify we skip when previous sync attempt failed and return error condition
|
||||
// Set current to 'aaaaa', desired to 'bbbbb' and add 'bbbbb' to failure history
|
||||
{
|
||||
t.Run("PreviousSyncAttemptFailed", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = &argoappv1.OperationState{
|
||||
Operation: argoappv1.Operation{
|
||||
@@ -321,12 +321,28 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: argoappv1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}})
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NeedsToPruneResourcesOnlyButAutomatedPruneDisabled", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
syncStatus := argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{
|
||||
{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync, RequiresPruning: true},
|
||||
})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
}
|
||||
|
||||
// TestAutoSyncIndicateError verifies we skip auto-sync and return error condition if previous sync failed
|
||||
@@ -357,7 +373,7 @@ func TestAutoSyncIndicateError(t *testing.T) {
|
||||
Source: *app.Spec.Source.DeepCopy(),
|
||||
},
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}})
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -400,7 +416,7 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{})
|
||||
cond := ctrl.autoSync(app, &syncStatus, []argoappv1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: argoappv1.SyncStatusCodeOutOfSync}})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get("my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
103
controller/cache/cache.go
vendored
103
controller/cache/cache.go
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@@ -28,7 +29,7 @@ type cacheSettings struct {
|
||||
|
||||
type LiveStateCache interface {
|
||||
// Returns k8s server version
|
||||
GetServerVersion(serverURL string) (string, error)
|
||||
GetVersionsInfo(serverURL string) (string, []metav1.APIGroup, error)
|
||||
// Returns true of given group kind is a namespaced resource
|
||||
IsNamespaced(server string, gk schema.GroupKind) (bool, error)
|
||||
// Executes give callback against resource specified by the key and all its children
|
||||
@@ -70,7 +71,7 @@ func NewLiveStateCache(
|
||||
appInformer: appInformer,
|
||||
db: db,
|
||||
clusters: make(map[string]*clusterInfo),
|
||||
lock: &sync.Mutex{},
|
||||
lock: &sync.RWMutex{},
|
||||
onObjectUpdated: onObjectUpdated,
|
||||
kubectl: kubectl,
|
||||
settingsMgr: settingsMgr,
|
||||
@@ -82,7 +83,7 @@ func NewLiveStateCache(
|
||||
type liveStateCache struct {
|
||||
db db.ArgoDB
|
||||
clusters map[string]*clusterInfo
|
||||
lock *sync.Mutex
|
||||
lock *sync.RWMutex
|
||||
appInformer cache.SharedIndexInformer
|
||||
onObjectUpdated ObjectUpdatedHandler
|
||||
kubectl kube.Kubectl
|
||||
@@ -109,32 +110,47 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
|
||||
c.lock.RLock()
|
||||
info, ok := c.clusters[server]
|
||||
c.lock.RUnlock()
|
||||
|
||||
if ok {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
info, ok := c.clusters[server]
|
||||
if !ok {
|
||||
cluster, err := c.db.GetCluster(context.Background(), server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = &clusterInfo{
|
||||
apisMeta: make(map[schema.GroupKind]*apiMeta),
|
||||
lock: &sync.Mutex{},
|
||||
nodes: make(map[kube.ResourceKey]*node),
|
||||
nsIndex: make(map[string]map[kube.ResourceKey]*node),
|
||||
onObjectUpdated: c.onObjectUpdated,
|
||||
kubectl: c.kubectl,
|
||||
cluster: cluster,
|
||||
syncTime: nil,
|
||||
log: log.WithField("server", cluster.Server),
|
||||
cacheSettingsSrc: c.getCacheSettings,
|
||||
onEventReceived: func(event watch.EventType, un *unstructured.Unstructured) {
|
||||
c.metricsServer.IncClusterEventsCount(cluster.Server)
|
||||
},
|
||||
}
|
||||
|
||||
c.clusters[cluster.Server] = info
|
||||
info, ok = c.clusters[server]
|
||||
if ok {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
logCtx := log.WithField("server", server)
|
||||
logCtx.Info("initializing cluster")
|
||||
cluster, err := c.db.GetCluster(context.Background(), server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = &clusterInfo{
|
||||
apisMeta: make(map[schema.GroupKind]*apiMeta),
|
||||
lock: &sync.RWMutex{},
|
||||
nodes: make(map[kube.ResourceKey]*node),
|
||||
nsIndex: make(map[string]map[kube.ResourceKey]*node),
|
||||
onObjectUpdated: c.onObjectUpdated,
|
||||
kubectl: c.kubectl,
|
||||
cluster: cluster,
|
||||
syncTime: nil,
|
||||
log: logCtx,
|
||||
cacheSettingsSrc: c.getCacheSettings,
|
||||
onEventReceived: func(event watch.EventType, un *unstructured.Unstructured) {
|
||||
gvk := un.GroupVersionKind()
|
||||
c.metricsServer.IncClusterEventsCount(cluster.Server, gvk.Group, gvk.Kind)
|
||||
},
|
||||
metricsServer: c.metricsServer,
|
||||
}
|
||||
c.clusters[cluster.Server] = info
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -152,8 +168,8 @@ func (c *liveStateCache) getSyncedCluster(server string) (*clusterInfo, error) {
|
||||
|
||||
func (c *liveStateCache) Invalidate() {
|
||||
log.Info("invalidating live state cache")
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RLock()
|
||||
for _, clust := range c.clusters {
|
||||
clust.invalidate()
|
||||
}
|
||||
@@ -190,14 +206,15 @@ func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clusterInfo.getManagedLiveObjs(a, targetObjs, c.metricsServer)
|
||||
return clusterInfo.getManagedLiveObjs(a, targetObjs)
|
||||
}
|
||||
func (c *liveStateCache) GetServerVersion(serverURL string) (string, error) {
|
||||
|
||||
func (c *liveStateCache) GetVersionsInfo(serverURL string) (string, []metav1.APIGroup, error) {
|
||||
clusterInfo, err := c.getSyncedCluster(serverURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", nil, err
|
||||
}
|
||||
return clusterInfo.serverVersion, nil
|
||||
return clusterInfo.serverVersion, clusterInfo.apiGroups, nil
|
||||
}
|
||||
|
||||
func isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool {
|
||||
@@ -210,8 +227,6 @@ func isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) getCacheSettings() *cacheSettings {
|
||||
c.cacheSettingsLock.Lock()
|
||||
defer c.cacheSettingsLock.Unlock()
|
||||
return c.cacheSettings
|
||||
}
|
||||
|
||||
@@ -261,8 +276,9 @@ func (c *liveStateCache) Run(ctx context.Context) error {
|
||||
util.RetryUntilSucceed(func() error {
|
||||
clusterEventCallback := func(event *db.ClusterEvent) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if cluster, ok := c.clusters[event.Cluster.Server]; ok {
|
||||
cluster, ok := c.clusters[event.Cluster.Server]
|
||||
if ok {
|
||||
defer c.lock.Unlock()
|
||||
if event.Type == watch.Deleted {
|
||||
cluster.invalidate()
|
||||
delete(c.clusters, event.Cluster.Server)
|
||||
@@ -270,11 +286,14 @@ func (c *liveStateCache) Run(ctx context.Context) error {
|
||||
cluster.cluster = event.Cluster
|
||||
cluster.invalidate()
|
||||
}
|
||||
} else if event.Type == watch.Added && isClusterHasApps(c.appInformer.GetStore().List(), event.Cluster) {
|
||||
go func() {
|
||||
// warm up cache for cluster with apps
|
||||
_, _ = c.getSyncedCluster(event.Cluster.Server)
|
||||
}()
|
||||
} else {
|
||||
c.lock.Unlock()
|
||||
if event.Type == watch.Added && isClusterHasApps(c.appInformer.GetStore().List(), event.Cluster) {
|
||||
go func() {
|
||||
// warm up cache for cluster with apps
|
||||
_, _ = c.getSyncedCluster(event.Cluster.Server)
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -287,8 +306,8 @@ func (c *liveStateCache) Run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) GetClustersInfo() []metrics.ClusterInfo {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
res := make([]metrics.ClusterInfo, 0)
|
||||
for _, info := range c.clusters {
|
||||
res = append(res, info.getClusterInfo())
|
||||
|
||||
6
controller/cache/cache_test.go
vendored
6
controller/cache/cache_test.go
vendored
@@ -11,16 +11,16 @@ import (
|
||||
func TestGetServerVersion(t *testing.T) {
|
||||
now := time.Now()
|
||||
cache := &liveStateCache{
|
||||
lock: &sync.Mutex{},
|
||||
lock: &sync.RWMutex{},
|
||||
clusters: map[string]*clusterInfo{
|
||||
"http://localhost": {
|
||||
syncTime: &now,
|
||||
lock: &sync.Mutex{},
|
||||
lock: &sync.RWMutex{},
|
||||
serverVersion: "123",
|
||||
},
|
||||
}}
|
||||
|
||||
version, err := cache.GetServerVersion("http://localhost")
|
||||
version, _, err := cache.GetVersionsInfo("http://localhost")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "123", version)
|
||||
}
|
||||
|
||||
227
controller/cache/cluster.go
vendored
227
controller/cache/cluster.go
vendored
@@ -9,19 +9,19 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/argoproj/argo-cd/controller/metrics"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
|
||||
"github.com/argoproj/argo-cd/controller/metrics"
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/health"
|
||||
@@ -45,8 +45,12 @@ type clusterInfo struct {
|
||||
syncError error
|
||||
apisMeta map[schema.GroupKind]*apiMeta
|
||||
serverVersion string
|
||||
apiGroups []metav1.APIGroup
|
||||
// namespacedResources is a simple map which indicates a groupKind is namespaced
|
||||
namespacedResources map[schema.GroupKind]bool
|
||||
|
||||
lock *sync.Mutex
|
||||
// lock is a rw lock which protects the fields of clusterInfo
|
||||
lock *sync.RWMutex
|
||||
nodes map[kube.ResourceKey]*node
|
||||
nsIndex map[string]map[kube.ResourceKey]*node
|
||||
|
||||
@@ -56,29 +60,32 @@ type clusterInfo struct {
|
||||
cluster *appv1.Cluster
|
||||
log *log.Entry
|
||||
cacheSettingsSrc func() *cacheSettings
|
||||
metricsServer *metrics.MetricsServer
|
||||
}
|
||||
|
||||
func (c *clusterInfo) replaceResourceCache(gk schema.GroupKind, resourceVersion string, objs []unstructured.Unstructured) {
|
||||
func (c *clusterInfo) replaceResourceCache(gk schema.GroupKind, resourceVersion string, objs []unstructured.Unstructured, ns string) {
|
||||
info, ok := c.apisMeta[gk]
|
||||
if ok {
|
||||
objByKind := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
objByKey := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
for i := range objs {
|
||||
objByKind[kube.GetResourceKey(&objs[i])] = &objs[i]
|
||||
objByKey[kube.GetResourceKey(&objs[i])] = &objs[i]
|
||||
}
|
||||
|
||||
// update existing nodes
|
||||
for i := range objs {
|
||||
obj := &objs[i]
|
||||
key := kube.GetResourceKey(&objs[i])
|
||||
existingNode, exists := c.nodes[key]
|
||||
c.onNodeUpdated(exists, existingNode, obj, key)
|
||||
c.onNodeUpdated(exists, existingNode, obj)
|
||||
}
|
||||
|
||||
// remove existing nodes that a no longer exist
|
||||
for key, existingNode := range c.nodes {
|
||||
if key.Kind != gk.Kind || key.Group != gk.Group {
|
||||
if key.Kind != gk.Kind || key.Group != gk.Group || ns != "" && key.Namespace != ns {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := objByKind[key]; !ok {
|
||||
if _, ok := objByKey[key]; !ok {
|
||||
c.onNodeRemoved(key, existingNode)
|
||||
}
|
||||
}
|
||||
@@ -115,8 +122,9 @@ func isServiceAccountTokenSecret(un *unstructured.Unstructured) (bool, metav1.Ow
|
||||
|
||||
func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLabel string) *node {
|
||||
ownerRefs := un.GetOwnerReferences()
|
||||
gvk := un.GroupVersionKind()
|
||||
// Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed
|
||||
if un.GroupVersionKind().Group == "" && un.GetKind() == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0 {
|
||||
if gvk.Group == "" && gvk.Kind == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0 {
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
Name: un.GetName(),
|
||||
Kind: kube.ServiceKind,
|
||||
@@ -124,6 +132,17 @@ func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLa
|
||||
})
|
||||
}
|
||||
|
||||
// Special case for Operator Lifecycle Manager ClusterServiceVersion:
|
||||
if un.GroupVersionKind().Group == "operators.coreos.com" && un.GetKind() == "ClusterServiceVersion" {
|
||||
if un.GetAnnotations()["olm.operatorGroup"] != "" {
|
||||
ownerRefs = append(ownerRefs, metav1.OwnerReference{
|
||||
Name: un.GetAnnotations()["olm.operatorGroup"],
|
||||
Kind: "OperatorGroup",
|
||||
APIVersion: "operators.coreos.com/v1",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// edge case. Consider auto-created service account tokens as a child of service account objects
|
||||
if yes, ref := isServiceAccountTokenSecret(un); yes {
|
||||
ownerRefs = append(ownerRefs, ref)
|
||||
@@ -140,7 +159,15 @@ func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLa
|
||||
if len(ownerRefs) == 0 && appName != "" {
|
||||
nodeInfo.appName = appName
|
||||
nodeInfo.resource = un
|
||||
} else {
|
||||
// edge case. we do not label CRDs, so they miss the tracking label we inject. But we still
|
||||
// want the full resource to be available in our cache (to diff), so we store all CRDs
|
||||
switch gvk.Kind {
|
||||
case kube.CustomResourceDefinitionKind:
|
||||
nodeInfo.resource = un
|
||||
}
|
||||
}
|
||||
|
||||
nodeInfo.health, _ = health.GetResourceHealth(un, c.cacheSettingsSrc().ResourceOverrides)
|
||||
return nodeInfo
|
||||
}
|
||||
@@ -174,26 +201,29 @@ func (c *clusterInfo) invalidate() {
|
||||
c.apisMeta[i].watchCancel()
|
||||
}
|
||||
c.apisMeta = nil
|
||||
c.namespacedResources = nil
|
||||
c.log.Warnf("invalidated cluster")
|
||||
}
|
||||
|
||||
func (c *clusterInfo) synced() bool {
|
||||
if c.syncTime == nil {
|
||||
syncTime := c.syncTime
|
||||
if syncTime == nil {
|
||||
return false
|
||||
}
|
||||
if c.syncError != nil {
|
||||
return time.Now().Before(c.syncTime.Add(clusterRetryTimeout))
|
||||
return time.Now().Before(syncTime.Add(clusterRetryTimeout))
|
||||
}
|
||||
return time.Now().Before(c.syncTime.Add(clusterSyncTimeout))
|
||||
return time.Now().Before(syncTime.Add(clusterSyncTimeout))
|
||||
}
|
||||
|
||||
func (c *clusterInfo) stopWatching(gk schema.GroupKind) {
|
||||
func (c *clusterInfo) stopWatching(gk schema.GroupKind, ns string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if info, ok := c.apisMeta[gk]; ok {
|
||||
info.watchCancel()
|
||||
delete(c.apisMeta, gk)
|
||||
c.replaceResourceCache(gk, "", []unstructured.Unstructured{})
|
||||
log.Warnf("Stop watching %s not found on %s.", gk, c.cluster.Server)
|
||||
c.replaceResourceCache(gk, "", []unstructured.Unstructured{}, ns)
|
||||
c.log.Warnf("Stop watching: %s not found", gk)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,16 +239,17 @@ func (c *clusterInfo) startMissingWatches() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
namespacedResources := make(map[schema.GroupKind]bool)
|
||||
for i := range apis {
|
||||
api := apis[i]
|
||||
namespacedResources[api.GroupKind] = api.Meta.Namespaced
|
||||
if _, ok := c.apisMeta[api.GroupKind]; !ok {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
info := &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel}
|
||||
c.apisMeta[api.GroupKind] = info
|
||||
|
||||
err = c.processApi(client, api, func(resClient dynamic.ResourceInterface) error {
|
||||
go c.watchEvents(ctx, api, info, resClient)
|
||||
err = c.processApi(client, api, func(resClient dynamic.ResourceInterface, ns string) error {
|
||||
go c.watchEvents(ctx, api, info, resClient, ns)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -226,16 +257,17 @@ func (c *clusterInfo) startMissingWatches() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
c.namespacedResources = namespacedResources
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSynced(lock *sync.Mutex, action func() error) error {
|
||||
func runSynced(lock sync.Locker, action func() error) error {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
return action()
|
||||
}
|
||||
|
||||
func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo, info *apiMeta, resClient dynamic.ResourceInterface) {
|
||||
func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo, info *apiMeta, resClient dynamic.ResourceInterface, ns string) {
|
||||
util.RetryUntilSucceed(func() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -245,11 +277,26 @@ func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo,
|
||||
|
||||
err = runSynced(c.lock, func() error {
|
||||
if info.resourceVersion == "" {
|
||||
list, err := resClient.List(metav1.ListOptions{})
|
||||
listPager := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
|
||||
res, err := resClient.List(opts)
|
||||
if err == nil {
|
||||
info.resourceVersion = res.GetResourceVersion()
|
||||
}
|
||||
return res, err
|
||||
})
|
||||
var items []unstructured.Unstructured
|
||||
err = listPager.EachListItem(ctx, metav1.ListOptions{}, func(obj runtime.Object) error {
|
||||
if un, ok := obj.(*unstructured.Unstructured); !ok {
|
||||
return fmt.Errorf("object %s/%s has an unexpected type", un.GroupVersionKind().String(), un.GetName())
|
||||
} else {
|
||||
items = append(items, *un)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load initial state of resource %s: %v", api.GroupKind.String(), err)
|
||||
}
|
||||
c.replaceResourceCache(api.GroupKind, list.GetResourceVersion(), list.Items)
|
||||
c.replaceResourceCache(api.GroupKind, info.resourceVersion, items, ns)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -260,18 +307,13 @@ func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo,
|
||||
|
||||
w, err := resClient.Watch(metav1.ListOptions{ResourceVersion: info.resourceVersion})
|
||||
if errors.IsNotFound(err) {
|
||||
c.stopWatching(api.GroupKind)
|
||||
c.stopWatching(api.GroupKind, ns)
|
||||
return nil
|
||||
}
|
||||
|
||||
err = runSynced(c.lock, func() error {
|
||||
if errors.IsGone(err) {
|
||||
info.resourceVersion = ""
|
||||
log.Warnf("Resource version of %s on %s is too old.", api.GroupKind, c.cluster.Server)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
if errors.IsGone(err) {
|
||||
info.resourceVersion = ""
|
||||
c.log.Warnf("Resource version of %s is too old", api.GroupKind)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -292,7 +334,7 @@ func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo,
|
||||
|
||||
if groupOk && groupErr == nil && kindOk && kindErr == nil {
|
||||
gk := schema.GroupKind{Group: group, Kind: kind}
|
||||
c.stopWatching(gk)
|
||||
c.stopWatching(gk, ns)
|
||||
}
|
||||
} else {
|
||||
err = runSynced(c.lock, func() error {
|
||||
@@ -302,7 +344,7 @@ func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo,
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Warnf("Failed to start missing watch: %v", err)
|
||||
c.log.Warnf("Failed to start missing watch: %v", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Watch %s on %s has closed", api.GroupKind, c.cluster.Server)
|
||||
@@ -313,10 +355,10 @@ func (c *clusterInfo) watchEvents(ctx context.Context, api kube.APIResourceInfo,
|
||||
}, fmt.Sprintf("watch %s on %s", api.GroupKind, c.cluster.Server), ctx, watchResourcesRetryTimeout)
|
||||
}
|
||||
|
||||
func (c *clusterInfo) processApi(client dynamic.Interface, api kube.APIResourceInfo, callback func(resClient dynamic.ResourceInterface) error) error {
|
||||
func (c *clusterInfo) processApi(client dynamic.Interface, api kube.APIResourceInfo, callback func(resClient dynamic.ResourceInterface, ns string) error) error {
|
||||
resClient := client.Resource(api.GroupVersionResource)
|
||||
if len(c.cluster.Namespaces) == 0 {
|
||||
return callback(resClient)
|
||||
return callback(resClient, "")
|
||||
}
|
||||
|
||||
if !api.Meta.Namespaced {
|
||||
@@ -324,7 +366,7 @@ func (c *clusterInfo) processApi(client dynamic.Interface, api kube.APIResourceI
|
||||
}
|
||||
|
||||
for _, ns := range c.cluster.Namespaces {
|
||||
err := callback(resClient.Namespace(ns))
|
||||
err := callback(resClient.Namespace(ns), ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -341,12 +383,19 @@ func (c *clusterInfo) sync() (err error) {
|
||||
}
|
||||
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
|
||||
c.nodes = make(map[kube.ResourceKey]*node)
|
||||
c.namespacedResources = make(map[schema.GroupKind]bool)
|
||||
config := c.cluster.RESTConfig()
|
||||
version, err := c.kubectl.GetServerVersion(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.serverVersion = version
|
||||
groups, err := c.kubectl.GetAPIGroups(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.apiGroups = groups
|
||||
|
||||
apis, err := c.kubectl.GetAPIResources(config, c.cacheSettingsSrc().ResourcesFilter)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -357,25 +406,47 @@ func (c *clusterInfo) sync() (err error) {
|
||||
}
|
||||
lock := sync.Mutex{}
|
||||
err = util.RunAllAsync(len(apis), func(i int) error {
|
||||
return c.processApi(client, apis[i], func(resClient dynamic.ResourceInterface) error {
|
||||
list, err := resClient.List(metav1.ListOptions{})
|
||||
api := apis[i]
|
||||
|
||||
lock.Lock()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
info := &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel}
|
||||
c.apisMeta[api.GroupKind] = info
|
||||
c.namespacedResources[api.GroupKind] = api.Meta.Namespaced
|
||||
lock.Unlock()
|
||||
|
||||
return c.processApi(client, api, func(resClient dynamic.ResourceInterface, ns string) error {
|
||||
|
||||
listPager := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
|
||||
res, err := resClient.List(opts)
|
||||
if err == nil {
|
||||
lock.Lock()
|
||||
info.resourceVersion = res.GetResourceVersion()
|
||||
lock.Unlock()
|
||||
}
|
||||
return res, err
|
||||
})
|
||||
|
||||
err = listPager.EachListItem(context.Background(), metav1.ListOptions{}, func(obj runtime.Object) error {
|
||||
if un, ok := obj.(*unstructured.Unstructured); !ok {
|
||||
return fmt.Errorf("object %s/%s has an unexpected type", un.GroupVersionKind().String(), un.GetName())
|
||||
} else {
|
||||
lock.Lock()
|
||||
c.setNode(c.createObjInfo(un, c.cacheSettingsSrc().AppInstanceLabelKey))
|
||||
lock.Unlock()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load initial state of resource %s: %v", api.GroupKind.String(), err)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
for i := range list.Items {
|
||||
c.setNode(c.createObjInfo(&list.Items[i], c.cacheSettingsSrc().AppInstanceLabelKey))
|
||||
}
|
||||
lock.Unlock()
|
||||
go c.watchEvents(ctx, api, info, resClient, ns)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
err = c.startMissingWatches()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Failed to sync cluster %s: %v", c.cluster.Server, err)
|
||||
return err
|
||||
@@ -386,12 +457,17 @@ func (c *clusterInfo) sync() (err error) {
|
||||
}
|
||||
|
||||
func (c *clusterInfo) ensureSynced() error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
// first check if cluster is synced *without lock*
|
||||
if c.synced() {
|
||||
return c.syncError
|
||||
}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
// before doing any work, check once again now that we have the lock, to see if it got
|
||||
// synced between the first check and now
|
||||
if c.synced() {
|
||||
return c.syncError
|
||||
}
|
||||
|
||||
err := c.sync()
|
||||
syncTime := time.Now()
|
||||
c.syncTime = &syncTime
|
||||
@@ -400,8 +476,8 @@ func (c *clusterInfo) ensureSynced() error {
|
||||
}
|
||||
|
||||
func (c *clusterInfo) getNamespaceTopLevelResources(namespace string) map[kube.ResourceKey]appv1.ResourceNode {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
nodes := make(map[kube.ResourceKey]appv1.ResourceNode)
|
||||
for _, node := range c.nsIndex[namespace] {
|
||||
if len(node.ownerRefs) == 0 {
|
||||
@@ -442,15 +518,17 @@ func (c *clusterInfo) iterateHierarchy(key kube.ResourceKey, action func(child a
|
||||
}
|
||||
|
||||
func (c *clusterInfo) isNamespaced(gk schema.GroupKind) bool {
|
||||
if api, ok := c.apisMeta[gk]; ok && !api.namespaced {
|
||||
return false
|
||||
// this is safe to access without a lock since we always replace the entire map instead of mutating keys
|
||||
if isNamespaced, ok := c.namespacedResources[gk]; ok {
|
||||
return isNamespaced
|
||||
}
|
||||
log.Warnf("group/kind %s scope is unknown (known objects: %d). assuming namespaced object", gk, len(c.namespacedResources))
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured, metricsServer *metrics.MetricsServer) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
managedObjs := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
// iterate all objects in live state cache to find ones associated with app
|
||||
@@ -459,8 +537,8 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
|
||||
managedObjs[key] = o.resource
|
||||
}
|
||||
}
|
||||
config := metrics.AddMetricsTransportWrapper(metricsServer, a, c.cluster.RESTConfig())
|
||||
// iterate target objects and identify ones that already exist in the cluster,\
|
||||
config := metrics.AddMetricsTransportWrapper(c.metricsServer, a, c.cluster.RESTConfig())
|
||||
// iterate target objects and identify ones that already exist in the cluster,
|
||||
// but are simply missing our label
|
||||
lock := &sync.Mutex{}
|
||||
err := util.RunAllAsync(len(targetObjs), func(i int) error {
|
||||
@@ -528,20 +606,23 @@ func (c *clusterInfo) processEvent(event watch.EventType, un *unstructured.Unstr
|
||||
if c.onEventReceived != nil {
|
||||
c.onEventReceived(event, un)
|
||||
}
|
||||
key := kube.GetResourceKey(un)
|
||||
if event == watch.Modified && skipAppRequeing(key) {
|
||||
return
|
||||
}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
key := kube.GetResourceKey(un)
|
||||
existingNode, exists := c.nodes[key]
|
||||
if event == watch.Deleted {
|
||||
if exists {
|
||||
c.onNodeRemoved(key, existingNode)
|
||||
}
|
||||
} else if event != watch.Deleted {
|
||||
c.onNodeUpdated(exists, existingNode, un, key)
|
||||
c.onNodeUpdated(exists, existingNode, un)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstructured.Unstructured, key kube.ResourceKey) {
|
||||
func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstructured.Unstructured) {
|
||||
nodes := make([]*node, 0)
|
||||
if exists {
|
||||
nodes = append(nodes, existingNode)
|
||||
@@ -554,7 +635,7 @@ func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstruc
|
||||
n := nodes[i]
|
||||
if ns, ok := c.nsIndex[n.ref.Namespace]; ok {
|
||||
app := n.getApp(ns)
|
||||
if app == "" || skipAppRequeing(key) {
|
||||
if app == "" {
|
||||
continue
|
||||
}
|
||||
toNotify[app] = n.isRootAppNode() || toNotify[app]
|
||||
@@ -584,8 +665,8 @@ var (
|
||||
)
|
||||
|
||||
func (c *clusterInfo) getClusterInfo() metrics.ClusterInfo {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return metrics.ClusterInfo{
|
||||
APIsCount: len(c.apisMeta),
|
||||
K8SVersion: c.serverVersion,
|
||||
|
||||
30
controller/cache/cluster_test.go
vendored
30
controller/cache/cluster_test.go
vendored
@@ -153,7 +153,7 @@ func newCluster(objs ...*unstructured.Unstructured) *clusterInfo {
|
||||
|
||||
func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
|
||||
return &clusterInfo{
|
||||
lock: &sync.Mutex{},
|
||||
lock: &sync.RWMutex{},
|
||||
nodes: make(map[kube.ResourceKey]*node),
|
||||
onObjectUpdated: func(managedByApp map[string]bool, reference corev1.ObjectReference) {},
|
||||
kubectl: kubectl,
|
||||
@@ -322,7 +322,7 @@ metadata:
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
}, []*unstructured.Unstructured{targetDeploy}, nil)
|
||||
}, []*unstructured.Unstructured{targetDeploy})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, managedObjs, map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.NewResourceKey("apps", "Deployment", "default", "helm-guestbook"): testDeploy,
|
||||
@@ -504,7 +504,8 @@ func TestWatchCacheUpdated(t *testing.T) {
|
||||
|
||||
podGroupKind := testPod.GroupVersionKind().GroupKind()
|
||||
|
||||
cluster.replaceResourceCache(podGroupKind, "updated-list-version", []unstructured.Unstructured{*updated, *added})
|
||||
cluster.lock.Lock()
|
||||
cluster.replaceResourceCache(podGroupKind, "updated-list-version", []unstructured.Unstructured{*updated, *added}, "")
|
||||
|
||||
_, ok := cluster.nodes[kube.GetResourceKey(removed)]
|
||||
assert.False(t, ok)
|
||||
@@ -515,6 +516,29 @@ func TestWatchCacheUpdated(t *testing.T) {
|
||||
|
||||
_, ok = cluster.nodes[kube.GetResourceKey(added)]
|
||||
assert.True(t, ok)
|
||||
cluster.lock.Unlock()
|
||||
}
|
||||
|
||||
func TestNamespaceModeReplace(t *testing.T) {
|
||||
ns1Pod := testPod.DeepCopy()
|
||||
ns1Pod.SetNamespace("ns1")
|
||||
ns1Pod.SetName("pod1")
|
||||
|
||||
ns2Pod := testPod.DeepCopy()
|
||||
ns2Pod.SetNamespace("ns2")
|
||||
podGroupKind := testPod.GroupVersionKind().GroupKind()
|
||||
|
||||
cluster := newCluster(ns1Pod, ns2Pod)
|
||||
err := cluster.ensureSynced()
|
||||
assert.Nil(t, err)
|
||||
|
||||
cluster.replaceResourceCache(podGroupKind, "", nil, "ns1")
|
||||
|
||||
_, ok := cluster.nodes[kube.GetResourceKey(ns1Pod)]
|
||||
assert.False(t, ok)
|
||||
|
||||
_, ok = cluster.nodes[kube.GetResourceKey(ns2Pod)]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestGetDuplicatedChildren(t *testing.T) {
|
||||
|
||||
23
controller/cache/mocks/LiveStateCache.go
vendored
23
controller/cache/mocks/LiveStateCache.go
vendored
@@ -14,6 +14,8 @@ import (
|
||||
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -84,8 +86,8 @@ func (_m *LiveStateCache) GetNamespaceTopLevelResources(server string, namespace
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetServerVersion provides a mock function with given fields: serverURL
|
||||
func (_m *LiveStateCache) GetServerVersion(serverURL string) (string, error) {
|
||||
// GetVersionsInfo provides a mock function with given fields: serverURL
|
||||
func (_m *LiveStateCache) GetVersionsInfo(serverURL string) (string, []v1.APIGroup, error) {
|
||||
ret := _m.Called(serverURL)
|
||||
|
||||
var r0 string
|
||||
@@ -95,14 +97,23 @@ func (_m *LiveStateCache) GetServerVersion(serverURL string) (string, error) {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
var r1 []v1.APIGroup
|
||||
if rf, ok := ret.Get(1).(func(string) []v1.APIGroup); ok {
|
||||
r1 = rf(serverURL)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).([]v1.APIGroup)
|
||||
}
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(string) error); ok {
|
||||
r2 = rf(serverURL)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// Invalidate provides a mock function with given fields:
|
||||
|
||||
@@ -3,6 +3,7 @@ package metrics
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -24,13 +25,17 @@ type MetricsServer struct {
|
||||
kubectlExecPendingGauge *prometheus.GaugeVec
|
||||
k8sRequestCounter *prometheus.CounterVec
|
||||
clusterEventsCounter *prometheus.CounterVec
|
||||
redisRequestCounter *prometheus.CounterVec
|
||||
reconcileHistogram *prometheus.HistogramVec
|
||||
redisRequestHistogram *prometheus.HistogramVec
|
||||
registry *prometheus.Registry
|
||||
}
|
||||
|
||||
const (
|
||||
// MetricsPath is the endpoint to collect application metrics
|
||||
MetricsPath = "/metrics"
|
||||
// EnvVarLegacyControllerMetrics is a env var to re-enable deprecated prometheus metrics
|
||||
EnvVarLegacyControllerMetrics = "ARGOCD_LEGACY_CONTROLLER_METRICS"
|
||||
)
|
||||
|
||||
// Follow Prometheus naming practices
|
||||
@@ -41,27 +46,88 @@ var (
|
||||
descAppInfo = prometheus.NewDesc(
|
||||
"argocd_app_info",
|
||||
"Information about application.",
|
||||
append(descAppDefaultLabels, "repo", "dest_server", "dest_namespace"),
|
||||
append(descAppDefaultLabels, "repo", "dest_server", "dest_namespace", "sync_status", "health_status", "operation"),
|
||||
nil,
|
||||
)
|
||||
// DEPRECATED
|
||||
descAppCreated = prometheus.NewDesc(
|
||||
"argocd_app_created_time",
|
||||
"Creation time in unix timestamp for an application.",
|
||||
descAppDefaultLabels,
|
||||
nil,
|
||||
)
|
||||
// DEPRECATED: superceded by sync_status label in argocd_app_info
|
||||
descAppSyncStatusCode = prometheus.NewDesc(
|
||||
"argocd_app_sync_status",
|
||||
"The application current sync status.",
|
||||
append(descAppDefaultLabels, "sync_status"),
|
||||
nil,
|
||||
)
|
||||
// DEPRECATED: superceded by health_status label in argocd_app_info
|
||||
descAppHealthStatus = prometheus.NewDesc(
|
||||
"argocd_app_health_status",
|
||||
"The application current health status.",
|
||||
append(descAppDefaultLabels, "health_status"),
|
||||
nil,
|
||||
)
|
||||
|
||||
syncCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_app_sync_total",
|
||||
Help: "Number of application syncs.",
|
||||
},
|
||||
append(descAppDefaultLabels, "dest_server", "phase"),
|
||||
)
|
||||
|
||||
k8sRequestCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_app_k8s_request_total",
|
||||
Help: "Number of kubernetes requests executed during application reconciliation.",
|
||||
},
|
||||
append(descAppDefaultLabels, "server", "response_code", "verb", "resource_kind", "resource_namespace"),
|
||||
)
|
||||
|
||||
kubectlExecCounter = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "argocd_kubectl_exec_total",
|
||||
Help: "Number of kubectl executions",
|
||||
}, []string{"command"})
|
||||
|
||||
kubectlExecPendingGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "argocd_kubectl_exec_pending",
|
||||
Help: "Number of pending kubectl executions",
|
||||
}, []string{"command"})
|
||||
|
||||
reconcileHistogram = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_app_reconcile",
|
||||
Help: "Application reconciliation performance.",
|
||||
// Buckets chosen after observing a ~2100ms mean reconcile time
|
||||
Buckets: []float64{0.25, .5, 1, 2, 4, 8, 16},
|
||||
},
|
||||
[]string{"namespace", "dest_server"},
|
||||
)
|
||||
|
||||
clusterEventsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "argocd_cluster_events_total",
|
||||
Help: "Number of processes k8s resource events.",
|
||||
}, append(descClusterDefaultLabels, "group", "kind"))
|
||||
|
||||
redisRequestCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_redis_request_total",
|
||||
Help: "Number of kubernetes requests executed during application reconciliation.",
|
||||
},
|
||||
[]string{"initiator", "failed"},
|
||||
)
|
||||
|
||||
redisRequestHistogram = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_redis_request_duration",
|
||||
Help: "Redis requests duration.",
|
||||
Buckets: []float64{0.01, 0.05, 0.10, 0.25, .5, 1},
|
||||
},
|
||||
[]string{"initiator"},
|
||||
)
|
||||
)
|
||||
|
||||
// NewMetricsServer returns a new prometheus server which collects application metrics
|
||||
@@ -76,51 +142,14 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, health
|
||||
}, promhttp.HandlerOpts{}))
|
||||
healthz.ServeHealthCheck(mux, healthCheck)
|
||||
|
||||
syncCounter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_app_sync_total",
|
||||
Help: "Number of application syncs.",
|
||||
},
|
||||
append(descAppDefaultLabels, "phase"),
|
||||
)
|
||||
registry.MustRegister(syncCounter)
|
||||
|
||||
k8sRequestCounter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_app_k8s_request_total",
|
||||
Help: "Number of kubernetes requests executed during application reconciliation.",
|
||||
},
|
||||
append(descAppDefaultLabels, "response_code"),
|
||||
)
|
||||
registry.MustRegister(k8sRequestCounter)
|
||||
|
||||
kubectlExecCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "argocd_kubectl_exec_total",
|
||||
Help: "Number of kubectl executions",
|
||||
}, []string{"command"})
|
||||
registry.MustRegister(kubectlExecCounter)
|
||||
kubectlExecPendingGauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "argocd_kubectl_exec_pending_total",
|
||||
Help: "Number of pending kubectl executions",
|
||||
}, []string{"command"})
|
||||
registry.MustRegister(kubectlExecPendingGauge)
|
||||
|
||||
reconcileHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_app_reconcile",
|
||||
Help: "Application reconciliation performance.",
|
||||
// Buckets chosen after observing a ~2100ms mean reconcile time
|
||||
Buckets: []float64{0.25, .5, 1, 2, 4, 8, 16},
|
||||
},
|
||||
descAppDefaultLabels,
|
||||
)
|
||||
|
||||
registry.MustRegister(reconcileHistogram)
|
||||
clusterEventsCounter := prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "argocd_cluster_events_total",
|
||||
Help: "Number of processes k8s resource events.",
|
||||
}, descClusterDefaultLabels)
|
||||
registry.MustRegister(clusterEventsCounter)
|
||||
registry.MustRegister(redisRequestCounter)
|
||||
registry.MustRegister(redisRequestHistogram)
|
||||
|
||||
return &MetricsServer{
|
||||
registry: registry,
|
||||
@@ -134,6 +163,8 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, health
|
||||
kubectlExecPendingGauge: kubectlExecPendingGauge,
|
||||
reconcileHistogram: reconcileHistogram,
|
||||
clusterEventsCounter: clusterEventsCounter,
|
||||
redisRequestCounter: redisRequestCounter,
|
||||
redisRequestHistogram: redisRequestHistogram,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,7 +179,7 @@ func (m *MetricsServer) IncSync(app *argoappv1.Application, state *argoappv1.Ope
|
||||
if !state.Phase.Completed() {
|
||||
return
|
||||
}
|
||||
m.syncCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), string(state.Phase)).Inc()
|
||||
m.syncCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), app.Spec.Destination.Server, string(state.Phase)).Inc()
|
||||
}
|
||||
|
||||
func (m *MetricsServer) IncKubectlExec(command string) {
|
||||
@@ -164,18 +195,36 @@ func (m *MetricsServer) DecKubectlExecPending(command string) {
|
||||
}
|
||||
|
||||
// IncClusterEventsCount increments the number of cluster events
|
||||
func (m *MetricsServer) IncClusterEventsCount(server string) {
|
||||
m.clusterEventsCounter.WithLabelValues(server).Inc()
|
||||
func (m *MetricsServer) IncClusterEventsCount(server, group, kind string) {
|
||||
m.clusterEventsCounter.WithLabelValues(server, group, kind).Inc()
|
||||
}
|
||||
|
||||
// IncKubernetesRequest increments the kubernetes requests counter for an application
|
||||
func (m *MetricsServer) IncKubernetesRequest(app *argoappv1.Application, statusCode int) {
|
||||
m.k8sRequestCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), strconv.Itoa(statusCode)).Inc()
|
||||
func (m *MetricsServer) IncKubernetesRequest(app *argoappv1.Application, server, statusCode, verb, resourceKind, resourceNamespace string) {
|
||||
var namespace, name, project string
|
||||
if app != nil {
|
||||
namespace = app.Namespace
|
||||
name = app.Name
|
||||
project = app.Spec.GetProject()
|
||||
}
|
||||
m.k8sRequestCounter.WithLabelValues(
|
||||
namespace, name, project, server, statusCode,
|
||||
verb, resourceKind, resourceNamespace,
|
||||
).Inc()
|
||||
}
|
||||
|
||||
func (m *MetricsServer) IncRedisRequest(failed bool) {
|
||||
m.redisRequestCounter.WithLabelValues("argocd-application-controller", strconv.FormatBool(failed)).Inc()
|
||||
}
|
||||
|
||||
// ObserveRedisRequestDuration observes redis request duration
|
||||
func (m *MetricsServer) ObserveRedisRequestDuration(duration time.Duration) {
|
||||
m.redisRequestHistogram.WithLabelValues("argocd-application-controller").Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// IncReconcile increments the reconcile counter for an application
|
||||
func (m *MetricsServer) IncReconcile(app *argoappv1.Application, duration time.Duration) {
|
||||
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject()).Observe(duration.Seconds())
|
||||
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Spec.Destination.Server).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
type appCollector struct {
|
||||
@@ -199,7 +248,6 @@ func NewAppRegistry(appLister applister.ApplicationLister) *prometheus.Registry
|
||||
// Describe implements the prometheus.Collector interface
|
||||
func (c *appCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- descAppInfo
|
||||
ch <- descAppCreated
|
||||
ch <- descAppSyncStatusCode
|
||||
ch <- descAppHealthStatus
|
||||
}
|
||||
@@ -233,20 +281,36 @@ func collectApps(ch chan<- prometheus.Metric, app *argoappv1.Application) {
|
||||
addConstMetric(desc, prometheus.GaugeValue, v, lv...)
|
||||
}
|
||||
|
||||
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace)
|
||||
|
||||
addGauge(descAppCreated, float64(app.CreationTimestamp.Unix()))
|
||||
|
||||
var operation string
|
||||
if app.DeletionTimestamp != nil {
|
||||
operation = "delete"
|
||||
} else if app.Operation != nil && app.Operation.Sync != nil {
|
||||
operation = "sync"
|
||||
}
|
||||
syncStatus := app.Status.Sync.Status
|
||||
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeSynced), string(argoappv1.SyncStatusCodeSynced))
|
||||
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeOutOfSync), string(argoappv1.SyncStatusCodeOutOfSync))
|
||||
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeUnknown || syncStatus == ""), string(argoappv1.SyncStatusCodeUnknown))
|
||||
|
||||
if syncStatus == "" {
|
||||
syncStatus = argoappv1.SyncStatusCodeUnknown
|
||||
}
|
||||
healthStatus := app.Status.Health.Status
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusUnknown || healthStatus == ""), argoappv1.HealthStatusUnknown)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusProgressing), argoappv1.HealthStatusProgressing)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusSuspended), argoappv1.HealthStatusSuspended)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusHealthy), argoappv1.HealthStatusHealthy)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusDegraded), argoappv1.HealthStatusDegraded)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusMissing), argoappv1.HealthStatusMissing)
|
||||
if healthStatus == "" {
|
||||
healthStatus = argoappv1.HealthStatusUnknown
|
||||
}
|
||||
|
||||
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace, string(syncStatus), healthStatus, operation)
|
||||
|
||||
// Deprecated controller metrics
|
||||
if os.Getenv(EnvVarLegacyControllerMetrics) == "true" {
|
||||
addGauge(descAppCreated, float64(app.CreationTimestamp.Unix()))
|
||||
|
||||
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeSynced), string(argoappv1.SyncStatusCodeSynced))
|
||||
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeOutOfSync), string(argoappv1.SyncStatusCodeOutOfSync))
|
||||
addGauge(descAppSyncStatusCode, boolFloat64(syncStatus == argoappv1.SyncStatusCodeUnknown || syncStatus == ""), string(argoappv1.SyncStatusCodeUnknown))
|
||||
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusUnknown || healthStatus == ""), argoappv1.HealthStatusUnknown)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusProgressing), argoappv1.HealthStatusProgressing)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusSuspended), argoappv1.HealthStatusSuspended)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusHealthy), argoappv1.HealthStatusHealthy)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusDegraded), argoappv1.HealthStatusDegraded)
|
||||
addGauge(descAppHealthStatus, boolFloat64(healthStatus == argoappv1.HealthStatusMissing), argoappv1.HealthStatusMissing)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -42,25 +43,52 @@ status:
|
||||
status: Healthy
|
||||
`
|
||||
|
||||
const expectedResponse = `# HELP argocd_app_created_time Creation time in unix timestamp for an application.
|
||||
# TYPE argocd_app_created_time gauge
|
||||
argocd_app_created_time{name="my-app",namespace="argocd",project="important-project"} -6.21355968e+10
|
||||
# HELP argocd_app_health_status The application current health status.
|
||||
# TYPE argocd_app_health_status gauge
|
||||
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd",project="important-project"} 1
|
||||
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Suspended",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
# HELP argocd_app_info Information about application.
|
||||
# TYPE argocd_app_info gauge
|
||||
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="important-project",repo="https://github.com/argoproj/argocd-example-apps"} 1
|
||||
# HELP argocd_app_sync_status The application current sync status.
|
||||
# TYPE argocd_app_sync_status gauge
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="OutOfSync"} 0
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Synced"} 1
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Unknown"} 0
|
||||
const fakeApp2 = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: my-app-2
|
||||
namespace: argocd
|
||||
spec:
|
||||
destination:
|
||||
namespace: dummy-namespace
|
||||
server: https://localhost:6443
|
||||
project: important-project
|
||||
source:
|
||||
path: some/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
status:
|
||||
sync:
|
||||
status: Synced
|
||||
health:
|
||||
status: Healthy
|
||||
operation:
|
||||
sync:
|
||||
revision: 041eab7439ece92c99b043f0e171788185b8fc1d
|
||||
syncStrategy:
|
||||
hook: {}
|
||||
`
|
||||
|
||||
const fakeApp3 = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: my-app-3
|
||||
namespace: argocd
|
||||
deletionTimestamp: "2020-03-16T09:17:45Z"
|
||||
spec:
|
||||
destination:
|
||||
namespace: dummy-namespace
|
||||
server: https://localhost:6443
|
||||
project: important-project
|
||||
source:
|
||||
path: some/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
status:
|
||||
sync:
|
||||
status: OutOfSync
|
||||
health:
|
||||
status: Degraded
|
||||
`
|
||||
|
||||
const fakeDefaultApp = `
|
||||
@@ -83,46 +111,26 @@ status:
|
||||
status: Healthy
|
||||
`
|
||||
|
||||
const expectedDefaultResponse = `# HELP argocd_app_created_time Creation time in unix timestamp for an application.
|
||||
# TYPE argocd_app_created_time gauge
|
||||
argocd_app_created_time{name="my-app",namespace="argocd",project="default"} -6.21355968e+10
|
||||
# HELP argocd_app_health_status The application current health status.
|
||||
# TYPE argocd_app_health_status gauge
|
||||
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd",project="default"} 0
|
||||
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd",project="default"} 1
|
||||
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd",project="default"} 0
|
||||
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd",project="default"} 0
|
||||
argocd_app_health_status{health_status="Suspended",name="my-app",namespace="argocd",project="default"} 0
|
||||
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd",project="default"} 0
|
||||
# HELP argocd_app_info Information about application.
|
||||
# TYPE argocd_app_info gauge
|
||||
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",name="my-app",namespace="argocd",project="default",repo="https://github.com/argoproj/argocd-example-apps"} 1
|
||||
# HELP argocd_app_sync_status The application current sync status.
|
||||
# TYPE argocd_app_sync_status gauge
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="OutOfSync"} 0
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="Synced"} 1
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="Unknown"} 0
|
||||
`
|
||||
|
||||
var noOpHealthCheck = func() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFakeApp(fakeApp string) *argoappv1.Application {
|
||||
func newFakeApp(fakeAppYAML string) *argoappv1.Application {
|
||||
var app argoappv1.Application
|
||||
err := yaml.Unmarshal([]byte(fakeApp), &app)
|
||||
err := yaml.Unmarshal([]byte(fakeAppYAML), &app)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &app
|
||||
}
|
||||
|
||||
func newFakeLister(fakeApp ...string) (context.CancelFunc, applister.ApplicationLister) {
|
||||
func newFakeLister(fakeAppYAMLs ...string) (context.CancelFunc, applister.ApplicationLister) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var fakeApps []runtime.Object
|
||||
for _, name := range fakeApp {
|
||||
fakeApps = append(fakeApps, newFakeApp(name))
|
||||
for _, appYAML := range fakeAppYAMLs {
|
||||
a := newFakeApp(appYAML)
|
||||
fakeApps = append(fakeApps, a)
|
||||
}
|
||||
appClientset := appclientset.NewSimpleClientset(fakeApps...)
|
||||
factory := appinformer.NewFilteredSharedInformerFactory(appClientset, 0, "argocd", func(options *metav1.ListOptions) {})
|
||||
@@ -134,8 +142,8 @@ func newFakeLister(fakeApp ...string) (context.CancelFunc, applister.Application
|
||||
return cancel, factory.Argoproj().V1alpha1().Applications().Lister()
|
||||
}
|
||||
|
||||
func testApp(t *testing.T, fakeApp string, expectedResponse string) {
|
||||
cancel, appLister := newFakeLister(fakeApp)
|
||||
func testApp(t *testing.T, fakeAppYAMLs []string, expectedResponse string) {
|
||||
cancel, appLister := newFakeLister(fakeAppYAMLs...)
|
||||
defer cancel()
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
|
||||
req, err := http.NewRequest("GET", "/metrics", nil)
|
||||
@@ -149,39 +157,75 @@ func testApp(t *testing.T, fakeApp string, expectedResponse string) {
|
||||
}
|
||||
|
||||
type testCombination struct {
|
||||
application string
|
||||
applications []string
|
||||
expectedResponse string
|
||||
}
|
||||
|
||||
func TestMetrics(t *testing.T) {
|
||||
combinations := []testCombination{
|
||||
{
|
||||
application: fakeApp,
|
||||
expectedResponse: expectedResponse,
|
||||
applications: []string{fakeApp, fakeApp2, fakeApp3},
|
||||
expectedResponse: `
|
||||
# HELP argocd_app_info Information about application.
|
||||
# TYPE argocd_app_info gauge
|
||||
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Degraded",name="my-app-3",namespace="argocd",operation="delete",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="OutOfSync"} 1
|
||||
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app",namespace="argocd",operation="",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1
|
||||
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app-2",namespace="argocd",operation="sync",project="important-project",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1
|
||||
`,
|
||||
},
|
||||
{
|
||||
application: fakeDefaultApp,
|
||||
expectedResponse: expectedDefaultResponse,
|
||||
applications: []string{fakeDefaultApp},
|
||||
expectedResponse: `
|
||||
# HELP argocd_app_info Information about application.
|
||||
# TYPE argocd_app_info gauge
|
||||
argocd_app_info{dest_namespace="dummy-namespace",dest_server="https://localhost:6443",health_status="Healthy",name="my-app",namespace="argocd",operation="",project="default",repo="https://github.com/argoproj/argocd-example-apps",sync_status="Synced"} 1
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, combination := range combinations {
|
||||
testApp(t, combination.application, combination.expectedResponse)
|
||||
testApp(t, combination.applications, combination.expectedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
const appSyncTotal = `# HELP argocd_app_sync_total Number of application syncs.
|
||||
# TYPE argocd_app_sync_total counter
|
||||
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Error",project="important-project"} 1
|
||||
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Failed",project="important-project"} 1
|
||||
argocd_app_sync_total{name="my-app",namespace="argocd",phase="Succeeded",project="important-project"} 2
|
||||
func TestLegacyMetrics(t *testing.T) {
|
||||
os.Setenv(EnvVarLegacyControllerMetrics, "true")
|
||||
defer os.Unsetenv(EnvVarLegacyControllerMetrics)
|
||||
|
||||
expectedResponse := `
|
||||
# HELP argocd_app_created_time Creation time in unix timestamp for an application.
|
||||
# TYPE argocd_app_created_time gauge
|
||||
argocd_app_created_time{name="my-app",namespace="argocd",project="important-project"} -6.21355968e+10
|
||||
# HELP argocd_app_health_status The application current health status.
|
||||
# TYPE argocd_app_health_status gauge
|
||||
argocd_app_health_status{health_status="Degraded",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Healthy",name="my-app",namespace="argocd",project="important-project"} 1
|
||||
argocd_app_health_status{health_status="Missing",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Progressing",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Suspended",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
argocd_app_health_status{health_status="Unknown",name="my-app",namespace="argocd",project="important-project"} 0
|
||||
# HELP argocd_app_sync_status The application current sync status.
|
||||
# TYPE argocd_app_sync_status gauge
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="OutOfSync"} 0
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Synced"} 1
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="important-project",sync_status="Unknown"} 0
|
||||
`
|
||||
testApp(t, []string{fakeApp}, expectedResponse)
|
||||
}
|
||||
|
||||
func TestMetricsSyncCounter(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
|
||||
|
||||
appSyncTotal := `
|
||||
# HELP argocd_app_sync_total Number of application syncs.
|
||||
# TYPE argocd_app_sync_total counter
|
||||
argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespace="argocd",phase="Error",project="important-project"} 1
|
||||
argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespace="argocd",phase="Failed",project="important-project"} 1
|
||||
argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespace="argocd",phase="Succeeded",project="important-project"} 2
|
||||
`
|
||||
|
||||
fakeApp := newFakeApp(fakeApp)
|
||||
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationRunning})
|
||||
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationFailed})
|
||||
@@ -202,27 +246,31 @@ func TestMetricsSyncCounter(t *testing.T) {
|
||||
// assertMetricsPrinted asserts every line in the expected lines appears in the body
|
||||
func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
|
||||
for _, line := range strings.Split(expectedLines, "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
assert.Contains(t, body, line)
|
||||
}
|
||||
}
|
||||
|
||||
const appReconcileMetrics = `argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="0.25"} 0
|
||||
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="0.5"} 0
|
||||
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="1"} 0
|
||||
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="2"} 0
|
||||
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="4"} 0
|
||||
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="8"} 1
|
||||
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="16"} 1
|
||||
argocd_app_reconcile_bucket{name="my-app",namespace="argocd",project="important-project",le="+Inf"} 1
|
||||
argocd_app_reconcile_sum{name="my-app",namespace="argocd",project="important-project"} 5
|
||||
argocd_app_reconcile_count{name="my-app",namespace="argocd",project="important-project"} 1
|
||||
`
|
||||
|
||||
func TestReconcileMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
|
||||
|
||||
appReconcileMetrics := `
|
||||
# HELP argocd_app_reconcile Application reconciliation performance.
|
||||
# TYPE argocd_app_reconcile histogram
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="0.25"} 0
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="0.5"} 0
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="1"} 0
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="2"} 0
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="4"} 0
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="8"} 1
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="16"} 1
|
||||
argocd_app_reconcile_bucket{dest_server="https://localhost:6443",namespace="argocd",le="+Inf"} 1
|
||||
argocd_app_reconcile_sum{dest_server="https://localhost:6443",namespace="argocd"} 5
|
||||
argocd_app_reconcile_count{dest_server="https://localhost:6443",namespace="argocd"} 1
|
||||
`
|
||||
fakeApp := newFakeApp(fakeApp)
|
||||
metricsServ.IncReconcile(fakeApp, 5*time.Second)
|
||||
|
||||
|
||||
@@ -1,37 +1,24 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/argoproj/pkg/kubeclientmetrics"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
type metricsRoundTripper struct {
|
||||
roundTripper http.RoundTripper
|
||||
app *v1alpha1.Application
|
||||
metricsServer *MetricsServer
|
||||
}
|
||||
|
||||
func (mrt *metricsRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
resp, err := mrt.roundTripper.RoundTrip(r)
|
||||
statusCode := 0
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
}
|
||||
mrt.metricsServer.IncKubernetesRequest(mrt.app, statusCode)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// AddMetricsTransportWrapper adds a transport wrapper which increments 'argocd_app_k8s_request_total' counter on each kubernetes request
|
||||
func AddMetricsTransportWrapper(server *MetricsServer, app *v1alpha1.Application, config *rest.Config) *rest.Config {
|
||||
wrap := config.WrapTransport
|
||||
config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
|
||||
if wrap != nil {
|
||||
rt = wrap(rt)
|
||||
}
|
||||
return &metricsRoundTripper{roundTripper: rt, metricsServer: server, app: app}
|
||||
inc := func(resourceInfo kubeclientmetrics.ResourceInfo) error {
|
||||
namespace := resourceInfo.Namespace
|
||||
kind := resourceInfo.Kind
|
||||
statusCode := strconv.Itoa(resourceInfo.StatusCode)
|
||||
server.IncKubernetesRequest(app, resourceInfo.Server, statusCode, string(resourceInfo.Verb), kind, namespace)
|
||||
return nil
|
||||
}
|
||||
return config
|
||||
|
||||
newConfig := kubeclientmetrics.AddMetricsTransportWrapper(config, inc)
|
||||
return newConfig
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/yudai/gojsondiff"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/util/resource"
|
||||
"github.com/argoproj/argo-cd/util/resource/ignore"
|
||||
"github.com/argoproj/argo-cd/util/settings"
|
||||
"github.com/argoproj/argo-cd/util/stats"
|
||||
)
|
||||
|
||||
type managedResource struct {
|
||||
@@ -70,6 +72,8 @@ type comparisonResult struct {
|
||||
hooks []*unstructured.Unstructured
|
||||
diffNormalizer diff.Normalizer
|
||||
appSourceType v1alpha1.ApplicationSourceType
|
||||
// timings maps phases of comparison to the duration it took to complete (for statistical purposes)
|
||||
timings map[string]time.Duration
|
||||
}
|
||||
|
||||
func (cr *comparisonResult) targetObjs() []*unstructured.Unstructured {
|
||||
@@ -96,14 +100,17 @@ type appStateManager struct {
|
||||
}
|
||||
|
||||
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1.ApplicationSource, appLabelKey, revision string, noCache bool) ([]*unstructured.Unstructured, []*unstructured.Unstructured, *apiclient.ManifestResponse, error) {
|
||||
ts := stats.NewTimingStats()
|
||||
helmRepos, err := m.db.ListHelmRepositories(context.Background())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("helm_ms")
|
||||
repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("repo_ms")
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -118,20 +125,26 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("plugins_ms")
|
||||
tools := make([]*appv1.ConfigManagementPlugin, len(plugins))
|
||||
for i := range plugins {
|
||||
tools[i] = &plugins[i]
|
||||
}
|
||||
|
||||
buildOptions, err := m.settingsMgr.GetKustomizeBuildOptions()
|
||||
kustomizeSettings, err := m.settingsMgr.GetKustomizeSettings()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
serverVersion, err := m.liveStateCache.GetServerVersion(app.Spec.Destination.Server)
|
||||
kustomizeOptions, err := kustomizeSettings.GetOptions(app.Spec.Source)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("build_options_ms")
|
||||
serverVersion, apiGroups, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("version_ms")
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
Repo: repo,
|
||||
Repos: helmRepos,
|
||||
@@ -142,18 +155,25 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
Plugins: tools,
|
||||
KustomizeOptions: &appv1.KustomizeOptions{
|
||||
BuildOptions: buildOptions,
|
||||
},
|
||||
KubeVersion: serverVersion,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIGroupsToVersions(apiGroups),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("manifests_ms")
|
||||
targetObjs, hooks, err := unmarshalManifests(manifestInfo.Manifests)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("unmarshal_ms")
|
||||
logCtx := log.WithField("application", app.Name)
|
||||
for k, v := range ts.Timings() {
|
||||
logCtx = logCtx.WithField(k, v.Milliseconds())
|
||||
}
|
||||
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
|
||||
logCtx.Info("getRepoObjs stats")
|
||||
return targetObjs, hooks, manifestInfo, nil
|
||||
}
|
||||
|
||||
@@ -165,7 +185,7 @@ func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, []*un
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if ignore.Ignore(obj) {
|
||||
if obj == nil || ignore.Ignore(obj) {
|
||||
continue
|
||||
}
|
||||
if hookutil.IsHook(obj) {
|
||||
@@ -252,27 +272,33 @@ func dedupLiveResources(targetObjs []*unstructured.Unstructured, liveObjsByKey m
|
||||
}
|
||||
}
|
||||
|
||||
func (m *appStateManager) getComparisonSettings(app *appv1.Application) (string, map[string]v1alpha1.ResourceOverride, diff.Normalizer, error) {
|
||||
func (m *appStateManager) getComparisonSettings(app *appv1.Application) (string, map[string]v1alpha1.ResourceOverride, diff.Normalizer, *settings.ResourcesFilter, error) {
|
||||
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
diffNormalizer, err := argo.NewDiffNormalizer(app.Spec.IgnoreDifferences, resourceOverrides)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
return appLabelKey, resourceOverrides, diffNormalizer, nil
|
||||
resFilter, err := m.settingsMgr.GetResourcesFilter()
|
||||
if err != nil {
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
return appLabelKey, resourceOverrides, diffNormalizer, resFilter, nil
|
||||
}
|
||||
|
||||
// CompareAppState compares application git state to the live app state, using the specified
|
||||
// revision and supplied source. If revision or overrides are empty, then compares against
|
||||
// revision and overrides in the app spec.
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revision string, source v1alpha1.ApplicationSource, noCache bool, localManifests []string) *comparisonResult {
|
||||
appLabelKey, resourceOverrides, diffNormalizer, err := m.getComparisonSettings(app)
|
||||
ts := stats.NewTimingStats()
|
||||
appLabelKey, resourceOverrides, diffNormalizer, resFilter, err := m.getComparisonSettings(app)
|
||||
ts.AddCheckpoint("settings_ms")
|
||||
|
||||
// return unknown comparison result if basic comparison settings cannot be loaded
|
||||
if err != nil {
|
||||
@@ -313,32 +339,27 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
}
|
||||
manifestInfo = nil
|
||||
}
|
||||
ts.AddCheckpoint("git_ms")
|
||||
|
||||
targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Server, app.Spec.Destination.Namespace, targetObjs, m.liveStateCache)
|
||||
if err != nil {
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
}
|
||||
conditions = append(conditions, dedupConditions...)
|
||||
|
||||
resFilter, err := m.settingsMgr.GetResourcesFilter()
|
||||
if err != nil {
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
} else {
|
||||
for i := len(targetObjs) - 1; i >= 0; i-- {
|
||||
targetObj := targetObjs[i]
|
||||
gvk := targetObj.GroupVersionKind()
|
||||
if resFilter.IsExcludedResource(gvk.Group, gvk.Kind, app.Spec.Destination.Server) {
|
||||
targetObjs = append(targetObjs[:i], targetObjs[i+1:]...)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{
|
||||
Type: v1alpha1.ApplicationConditionExcludedResourceWarning,
|
||||
Message: fmt.Sprintf("Resource %s/%s %s is excluded in the settings", gvk.Group, gvk.Kind, targetObj.GetName()),
|
||||
LastTransitionTime: &now,
|
||||
})
|
||||
}
|
||||
for i := len(targetObjs) - 1; i >= 0; i-- {
|
||||
targetObj := targetObjs[i]
|
||||
gvk := targetObj.GroupVersionKind()
|
||||
if resFilter.IsExcludedResource(gvk.Group, gvk.Kind, app.Spec.Destination.Server) {
|
||||
targetObjs = append(targetObjs[:i], targetObjs[i+1:]...)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{
|
||||
Type: v1alpha1.ApplicationConditionExcludedResourceWarning,
|
||||
Message: fmt.Sprintf("Resource %s/%s %s is excluded in the settings", gvk.Group, gvk.Kind, targetObj.GetName()),
|
||||
LastTransitionTime: &now,
|
||||
})
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("dedup_ms")
|
||||
|
||||
logCtx.Debugf("Generated config manifests")
|
||||
liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)
|
||||
if err != nil {
|
||||
liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
|
||||
@@ -353,7 +374,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
}
|
||||
}
|
||||
|
||||
logCtx.Debugf("Retrieved lived manifests")
|
||||
for _, liveObj := range liveObjByKey {
|
||||
if liveObj != nil {
|
||||
appInstanceName := kubeutil.GetAppInstanceLabel(liveObj, appLabelKey)
|
||||
@@ -382,7 +402,8 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
managedLiveObj[i] = nil
|
||||
}
|
||||
}
|
||||
logCtx.Debugf("built managed objects list")
|
||||
ts.AddCheckpoint("live_ms")
|
||||
|
||||
// Everything remaining in liveObjByKey are "extra" resources that aren't tracked in git.
|
||||
// The following adds all the extras to the managedLiveObj list and backfills the targetObj
|
||||
// list with nils, so that the lists are of equal lengths for comparison purposes.
|
||||
@@ -398,6 +419,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
failedToLoadObjs = true
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
}
|
||||
ts.AddCheckpoint("diff_ms")
|
||||
|
||||
syncCode := v1alpha1.SyncStatusCodeSynced
|
||||
managedResources := make([]managedResource, len(targetObjs))
|
||||
@@ -423,7 +445,17 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
RequiresPruning: targetObj == nil && liveObj != nil,
|
||||
}
|
||||
|
||||
diffResult := diffResults.Diffs[i]
|
||||
var diffResult diff.DiffResult
|
||||
if i < len(diffResults.Diffs) {
|
||||
diffResult = diffResults.Diffs[i]
|
||||
} else {
|
||||
diffResult = diff.DiffResult{
|
||||
Diff: gojsondiff.New().CompareObjects(map[string]interface{}{}, map[string]interface{}{}),
|
||||
Modified: false,
|
||||
NormalizedLive: []byte("{}"),
|
||||
PredictedLive: []byte("{}"),
|
||||
}
|
||||
}
|
||||
if resState.Hook || ignore.Ignore(obj) {
|
||||
// For resource hooks, don't store sync status, and do not affect overall sync status
|
||||
} else if diffResult.Modified || targetObj == nil || liveObj == nil {
|
||||
@@ -477,6 +509,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
if manifestInfo != nil {
|
||||
syncStatus.Revision = manifestInfo.Revision
|
||||
}
|
||||
ts.AddCheckpoint("sync_ms")
|
||||
|
||||
healthStatus, err := health.SetApplicationHealth(resourceSummaries, GetLiveObjs(managedResources), resourceOverrides, func(obj *unstructured.Unstructured) bool {
|
||||
return !isSelfReferencedApp(app, kubeutil.GetObjectRef(obj))
|
||||
@@ -503,6 +536,8 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
appv1.ApplicationConditionRepeatedResourceWarning: true,
|
||||
appv1.ApplicationConditionExcludedResourceWarning: true,
|
||||
})
|
||||
ts.AddCheckpoint("health_ms")
|
||||
compRes.timings = ts.Timings()
|
||||
return &compRes
|
||||
}
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ type syncContext struct {
|
||||
proj *v1alpha1.AppProject
|
||||
compareResult *comparisonResult
|
||||
config *rest.Config
|
||||
rawConfig *rest.Config
|
||||
dynamicIf dynamic.Interface
|
||||
disco discovery.DiscoveryInterface
|
||||
extensionsclientset *clientset.Clientset
|
||||
@@ -173,6 +174,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
proj: proj,
|
||||
compareResult: compareResult,
|
||||
config: restConfig,
|
||||
rawConfig: clst.RawRestConfig(),
|
||||
dynamicIf: dynamicIf,
|
||||
disco: disco,
|
||||
extensionsclientset: extensionsclientset,
|
||||
@@ -422,7 +424,13 @@ func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {
|
||||
// metadata.generateName, then we will generate a formulated metadata.name before submission.
|
||||
targetObj := obj.DeepCopy()
|
||||
if targetObj.GetName() == "" {
|
||||
postfix := strings.ToLower(fmt.Sprintf("%s-%s-%d", sc.syncRes.Revision[0:7], phase, sc.opState.StartedAt.UTC().Unix()))
|
||||
var syncRevision string
|
||||
if len(sc.syncRes.Revision) >= 8 {
|
||||
syncRevision = sc.syncRes.Revision[0:7]
|
||||
} else {
|
||||
syncRevision = sc.syncRes.Revision
|
||||
}
|
||||
postfix := strings.ToLower(fmt.Sprintf("%s-%s-%d", syncRevision, phase, sc.opState.StartedAt.UTC().Unix()))
|
||||
generateName := obj.GetGenerateName()
|
||||
targetObj.SetName(fmt.Sprintf("%s%s", generateName, postfix))
|
||||
}
|
||||
@@ -476,9 +484,11 @@ func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {
|
||||
serverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, task.groupVersionKind())
|
||||
if err != nil {
|
||||
// Special case for custom resources: if CRD is not yet known by the K8s API server,
|
||||
// skip verification during `kubectl apply --dry-run` since we expect the CRD
|
||||
// and the CRD is part of this sync or the resource is annotated with SkipDryRunOnMissingResource=true,
|
||||
// then skip verification during `kubectl apply --dry-run` since we expect the CRD
|
||||
// to be created during app synchronization.
|
||||
if apierr.IsNotFound(err) && sc.hasCRDOfGroupKind(task.group(), task.kind()) {
|
||||
skipDryRunOnMissingResource := resource.HasAnnotationOption(task.targetObj, common.AnnotationSyncOptions, "SkipDryRunOnMissingResource=true")
|
||||
if apierr.IsNotFound(err) && (skipDryRunOnMissingResource || sc.hasCRDOfGroupKind(task.group(), task.kind())) {
|
||||
sc.log.WithFields(log.Fields{"task": task}).Debug("skip dry-run for custom resource")
|
||||
task.skipDryRun = true
|
||||
} else {
|
||||
@@ -548,9 +558,8 @@ func (sc *syncContext) ensureCRDReady(name string) {
|
||||
}
|
||||
|
||||
// applyObject performs a `kubectl apply` of a single resource
|
||||
func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun bool, force bool) (v1alpha1.ResultCode, string) {
|
||||
validate := !resource.HasAnnotationOption(targetObj, common.AnnotationSyncOptions, "Validate=false")
|
||||
message, err := sc.kubectl.ApplyResource(sc.config, targetObj, targetObj.GetNamespace(), dryRun, force, validate)
|
||||
func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun, force, validate bool) (v1alpha1.ResultCode, string) {
|
||||
message, err := sc.kubectl.ApplyResource(sc.rawConfig, targetObj, targetObj.GetNamespace(), dryRun, force, validate)
|
||||
if err != nil {
|
||||
return v1alpha1.ResultCodeSyncFailed, err.Error()
|
||||
}
|
||||
@@ -562,10 +571,10 @@ func (sc *syncContext) applyObject(targetObj *unstructured.Unstructured, dryRun
|
||||
|
||||
// pruneObject deletes the object if both prune is true and dryRun is false. Otherwise appropriate message
|
||||
func (sc *syncContext) pruneObject(liveObj *unstructured.Unstructured, prune, dryRun bool) (v1alpha1.ResultCode, string) {
|
||||
if !prune {
|
||||
return v1alpha1.ResultCodePruneSkipped, "ignored (requires pruning)"
|
||||
} else if resource.HasAnnotationOption(liveObj, common.AnnotationSyncOptions, "Prune=false") {
|
||||
if resource.HasAnnotationOption(liveObj, common.AnnotationSyncOptions, "Prune=false") {
|
||||
return v1alpha1.ResultCodePruneSkipped, "ignored (no prune)"
|
||||
} else if !prune {
|
||||
return v1alpha1.ResultCodePruneSkipped, "ignored (requires pruning)"
|
||||
} else {
|
||||
if dryRun {
|
||||
return v1alpha1.ResultCodePruned, "pruned (dry run)"
|
||||
@@ -750,7 +759,8 @@ func (sc *syncContext) runTasks(tasks syncTasks, dryRun bool) runState {
|
||||
defer createWg.Done()
|
||||
logCtx := sc.log.WithFields(log.Fields{"dryRun": dryRun, "task": t})
|
||||
logCtx.Debug("applying")
|
||||
result, message := sc.applyObject(t.targetObj, dryRun, sc.syncOp.SyncStrategy.Force())
|
||||
validate := !(sc.syncOp.SyncOptions.HasOption("Validate=false") || resource.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, "Validate=false"))
|
||||
result, message := sc.applyObject(t.targetObj, dryRun, sc.syncOp.SyncStrategy.Force(), validate)
|
||||
if result == v1alpha1.ResultCodeSyncFailed {
|
||||
logCtx.WithField("message", message).Info("apply failed")
|
||||
runState = failed
|
||||
|
||||
@@ -44,6 +44,7 @@ func newTestSyncCtx(resources ...*v1.APIResourceList) *syncContext {
|
||||
})
|
||||
sc := syncContext{
|
||||
config: &rest.Config{},
|
||||
rawConfig: &rest.Config{},
|
||||
namespace: test.FakeArgoCDNamespace,
|
||||
server: test.FakeClusterURL,
|
||||
syncRes: &v1alpha1.SyncOperationResult{
|
||||
@@ -365,6 +366,20 @@ func TestSyncOptionValidate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// make sure Validate means we don't validate
|
||||
func TestSyncValidate(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
pod := test.NewPod()
|
||||
pod.SetNamespace(test.FakeArgoCDNamespace)
|
||||
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Target: pod, Live: pod}}}
|
||||
syncCtx.syncOp.SyncOptions = SyncOptions{"Validate=false"}
|
||||
|
||||
syncCtx.sync()
|
||||
|
||||
kubectl := syncCtx.kubectl.(*kubetest.MockKubectlCmd)
|
||||
assert.False(t, kubectl.LastValidate)
|
||||
}
|
||||
|
||||
func TestSelectiveSyncOnly(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
pod1 := test.NewPod()
|
||||
@@ -384,20 +399,40 @@ func TestSelectiveSyncOnly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnnamedHooksGetUniqueNames(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
syncCtx.syncOp.SyncStrategy.Apply = nil
|
||||
pod := test.NewPod()
|
||||
pod.SetName("")
|
||||
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
|
||||
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
|
||||
t.Run("Truncated revision", func(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
syncCtx.syncOp.SyncStrategy.Apply = nil
|
||||
pod := test.NewPod()
|
||||
pod.SetName("")
|
||||
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
|
||||
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
|
||||
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 2)
|
||||
assert.Contains(t, tasks[0].name(), "foobarb-presync-")
|
||||
assert.Contains(t, tasks[1].name(), "foobarb-postsync-")
|
||||
assert.Equal(t, "", pod.GetName())
|
||||
})
|
||||
|
||||
t.Run("Short revision", func(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
syncCtx.syncOp.SyncStrategy.Apply = nil
|
||||
pod := test.NewPod()
|
||||
pod.SetName("")
|
||||
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
|
||||
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
|
||||
syncCtx.syncRes.Revision = "foobar"
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 2)
|
||||
assert.Contains(t, tasks[0].name(), "foobar-presync-")
|
||||
assert.Contains(t, tasks[1].name(), "foobar-postsync-")
|
||||
assert.Equal(t, "", pod.GetName())
|
||||
})
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 2)
|
||||
assert.Contains(t, tasks[0].name(), "foobarb-presync-")
|
||||
assert.Contains(t, tasks[1].name(), "foobarb-postsync-")
|
||||
assert.Equal(t, "", pod.GetName())
|
||||
}
|
||||
|
||||
func TestManagedResourceAreNotNamed(t *testing.T) {
|
||||
@@ -443,6 +478,101 @@ func TestObjectsGetANamespace(t *testing.T) {
|
||||
assert.Equal(t, "", pod.GetNamespace())
|
||||
}
|
||||
|
||||
func TestSyncCustomResources(t *testing.T) {
|
||||
type fields struct {
|
||||
skipDryRunAnnotationPresent bool
|
||||
crdAlreadyPresent bool
|
||||
crdInSameSync bool
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
wantDryRun bool
|
||||
wantSuccess bool
|
||||
}{
|
||||
|
||||
{"unknown crd", fields{
|
||||
skipDryRunAnnotationPresent: false, crdAlreadyPresent: false, crdInSameSync: false,
|
||||
}, true, false},
|
||||
{"crd present in same sync", fields{
|
||||
skipDryRunAnnotationPresent: false, crdAlreadyPresent: false, crdInSameSync: true,
|
||||
}, false, true},
|
||||
{"crd is already present in cluster", fields{
|
||||
skipDryRunAnnotationPresent: false, crdAlreadyPresent: true, crdInSameSync: false,
|
||||
}, true, true},
|
||||
{"crd is already present in cluster, skip dry run annotated", fields{
|
||||
skipDryRunAnnotationPresent: true, crdAlreadyPresent: true, crdInSameSync: false,
|
||||
}, true, true},
|
||||
{"unknown crd, skip dry run annotated", fields{
|
||||
skipDryRunAnnotationPresent: true, crdAlreadyPresent: false, crdInSameSync: false,
|
||||
}, false, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
knownCustomResourceTypes := []v1.APIResource{}
|
||||
if tt.fields.crdAlreadyPresent {
|
||||
knownCustomResourceTypes = append(knownCustomResourceTypes, v1.APIResource{Kind: "TestCrd", Group: "argoproj.io", Version: "v1", Namespaced: true})
|
||||
}
|
||||
|
||||
syncCtx := newTestSyncCtx(
|
||||
&v1.APIResourceList{
|
||||
GroupVersion: "argoproj.io/v1",
|
||||
APIResources: knownCustomResourceTypes,
|
||||
},
|
||||
&v1.APIResourceList{
|
||||
GroupVersion: "apiextensions.k8s.io/v1beta1",
|
||||
APIResources: []v1.APIResource{
|
||||
{Kind: "CustomResourceDefinition", Group: "apiextensions.k8s.io", Version: "v1beta1", Namespaced: true},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
cr := test.Unstructured(`
|
||||
{
|
||||
"apiVersion": "argoproj.io/v1",
|
||||
"kind": "TestCrd",
|
||||
"metadata": {
|
||||
"name": "my-resource"
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
if tt.fields.skipDryRunAnnotationPresent {
|
||||
cr.SetAnnotations(map[string]string{common.AnnotationSyncOptions: "SkipDryRunOnMissingResource=true"})
|
||||
}
|
||||
|
||||
resources := []managedResource{{Target: cr}}
|
||||
if tt.fields.crdInSameSync {
|
||||
resources = append(resources, managedResource{Target: test.NewCRD()})
|
||||
}
|
||||
|
||||
syncCtx.compareResult = &comparisonResult{managedResources: resources}
|
||||
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
if successful != tt.wantSuccess {
|
||||
t.Errorf("successful = %v, want: %v", successful, tt.wantSuccess)
|
||||
return
|
||||
}
|
||||
|
||||
skipDryRun := false
|
||||
for _, task := range tasks {
|
||||
if task.targetObj.GetKind() == cr.GetKind() {
|
||||
skipDryRun = task.skipDryRun
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if tt.wantDryRun != !skipDryRun {
|
||||
t.Errorf("dryRun = %v, want: %v", !skipDryRun, tt.wantDryRun)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPersistRevisionHistory(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = nil
|
||||
|
||||
@@ -1,167 +1 @@
|
||||
# Contributing
|
||||
|
||||
## Before You Start
|
||||
|
||||
You must install and run the ArgoCD using a local Kubernetes (e.g. Docker for Desktop or Minikube) first. This will help you understand the application, but also get your local environment set-up.
|
||||
|
||||
Then, to get a good grounding in Go, try out [the tutorial](https://tour.golang.org/).
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
Install:
|
||||
|
||||
* [docker](https://docs.docker.com/install/#supported-platforms)
|
||||
* [git](https://git-scm.com/) and [git-lfs](https://git-lfs.github.com/)
|
||||
* [golang](https://golang.org/)
|
||||
* [dep](https://github.com/golang/dep)
|
||||
* [ksonnet](https://github.com/ksonnet/ksonnet#install)
|
||||
* [helm](https://github.com/helm/helm/releases)
|
||||
* [kustomize](https://github.com/kubernetes-sigs/kustomize/releases)
|
||||
* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
* [kubectx](https://kubectx.dev)
|
||||
* [minikube](https://kubernetes.io/docs/setup/minikube/) or Docker for Desktop
|
||||
|
||||
Brew users can quickly install the lot:
|
||||
|
||||
```bash
|
||||
brew install go git-lfs kubectl kubectx dep ksonnet/tap/ks kubernetes-helm kustomize
|
||||
```
|
||||
|
||||
Check the versions:
|
||||
|
||||
```bash
|
||||
go version ;# must be v1.12.x
|
||||
helm version ;# must be v2.13.x
|
||||
kustomize version ;# must be v3.1.x
|
||||
```
|
||||
|
||||
Set up environment variables (e.g. is `~/.bashrc`):
|
||||
|
||||
```bash
|
||||
export GOPATH=~/go
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```
|
||||
|
||||
Checkout the code:
|
||||
|
||||
```bash
|
||||
go get -u github.com/argoproj/argo-cd
|
||||
cd ~/go/src/github.com/argoproj/argo-cd
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
Ensure dependencies are up to date first:
|
||||
|
||||
```shell
|
||||
dep ensure
|
||||
make dev-tools-image
|
||||
make install-lint-tools
|
||||
go get github.com/mattn/goreman
|
||||
go get github.com/jstemmer/go-junit-report
|
||||
```
|
||||
|
||||
Common make targets:
|
||||
|
||||
* `make codegen` - Run code generation
|
||||
* `make lint` - Lint code
|
||||
* `make test` - Run unit tests
|
||||
* `make cli` - Make the `argocd` CLI tool
|
||||
|
||||
Check out the following [documentation](https://github.com/argoproj/argo-cd/blob/master/docs/developer-guide/test-e2e.md) for instructions on running the e2e tests.
|
||||
|
||||
## Running Locally
|
||||
|
||||
It is much easier to run and debug if you run ArgoCD on your local machine than in the Kubernetes cluster.
|
||||
|
||||
You should scale the deployments to zero:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd scale deployment/argocd-application-controller --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-dex-server --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-repo-server --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-server --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-redis --replicas 0
|
||||
```
|
||||
|
||||
Download Yarn dependencies and Compile:
|
||||
|
||||
```bash
|
||||
~/go/src/github.com/argoproj/argo-cd/ui
|
||||
yarn install
|
||||
yarn build
|
||||
```
|
||||
|
||||
Then start the services:
|
||||
|
||||
```bash
|
||||
cd ~/go/src/github.com/argoproj/argo-cd
|
||||
make start
|
||||
```
|
||||
|
||||
You can now execute `argocd` command against your locally running ArgoCD by appending `--server localhost:8080 --plaintext --insecure`, e.g.:
|
||||
|
||||
```bash
|
||||
argocd app create guestbook --path guestbook --repo https://github.com/argoproj/argocd-example-apps.git --dest-server https://kubernetes.default.svc --dest-namespace default --server localhost:8080 --plaintext --insecure
|
||||
```
|
||||
|
||||
You can open the UI: [http://localhost:4000](http://localhost:4000)
|
||||
|
||||
As an alternative to using the above command line parameters each time you call `argocd` CLI, you can set the following environment variables:
|
||||
|
||||
```bash
|
||||
export ARGOCD_SERVER=127.0.0.1:8080
|
||||
export ARGOCD_OPTS="--plaintext --insecure"
|
||||
```
|
||||
|
||||
## Running Local Containers
|
||||
|
||||
You may need to run containers locally, so here's how:
|
||||
|
||||
Create login to Docker Hub, then login.
|
||||
|
||||
```bash
|
||||
docker login
|
||||
```
|
||||
|
||||
Add your username as the environment variable, e.g. to your `~/.bash_profile`:
|
||||
|
||||
```bash
|
||||
export IMAGE_NAMESPACE=alexcollinsintuit
|
||||
```
|
||||
|
||||
If you don't want to use `latest` as the image's tag (the default), you can set it from the environment too:
|
||||
|
||||
```bash
|
||||
export IMAGE_TAG=yourtag
|
||||
```
|
||||
|
||||
Build the image:
|
||||
|
||||
```bash
|
||||
DOCKER_PUSH=true make image
|
||||
```
|
||||
|
||||
Update the manifests (be sure to do that from a shell that has above environment variables set)
|
||||
|
||||
```bash
|
||||
make manifests
|
||||
```
|
||||
|
||||
Install the manifests:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd apply --force -f manifests/install.yaml
|
||||
```
|
||||
|
||||
Scale your deployments up:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd scale deployment/argocd-application-controller --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-dex-server --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-repo-server --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-server --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-redis --replicas 1
|
||||
```
|
||||
|
||||
Now you can set-up the port-forwarding and open the UI or CLI.
|
||||
Please refer to [the Contribution Guide](https://argoproj.github.io/argo-cd/developer-guide/contributing/)
|
||||
|
||||
BIN
docs/assets/azure-api-permissions.png
Normal file
BIN
docs/assets/azure-api-permissions.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 37 KiB |
BIN
docs/assets/azure-token-configuration.png
Normal file
BIN
docs/assets/azure-token-configuration.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 94 KiB |
175
docs/bug_triage.md
Normal file
175
docs/bug_triage.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# Bug triage proposal for ArgoCD
|
||||
|
||||
## Situation
|
||||
|
||||
Lots of issues on our issue tracker. Many of them not bugs, but questions,
|
||||
or very environment related. It's easy to lose oversight.
|
||||
|
||||
Also, it's not obvous which bugs are important. Which bugs should be fixed
|
||||
first? Can we make a new release with the current inventory of open bugs?
|
||||
Is there still a bug that should make it to the new release?
|
||||
|
||||
## Proposal
|
||||
|
||||
We should agree upon a common issue triage process. The process must be lean
|
||||
and efficient, and should support us and the community looking into the GH
|
||||
issue tracker at making the following decisions:
|
||||
|
||||
* Is it even a real bug?
|
||||
* If it is a real bug, what is the current status of the bug (next to "open" or "closed")?
|
||||
* How important is it to fix the bug?
|
||||
* How urgent is it to fix the bug?
|
||||
* Who will be working to fix the bug?
|
||||
|
||||
We need new methods to classify our bugs, at least into these categories:
|
||||
|
||||
* validity: Does the issue indeed represent a true bug
|
||||
* severity: Denominates what impact the bug has
|
||||
* priority: Denominates the urgency of the fix
|
||||
|
||||
## Triage process
|
||||
|
||||
GH issue tracker provides us with the possibility to label issues. Using these
|
||||
labels is not perfect, but should give a good start. Each new issue created in
|
||||
our issue tracker should be correctly labeled during its lifecycle, so keeping
|
||||
an overview would be simplified by the ability to filter for labels.
|
||||
|
||||
The triage process could be as follows:
|
||||
|
||||
1. A new bug issue is created by someone on the tracker
|
||||
|
||||
1. The first person of the core team to see it will start the triage by classifying
|
||||
the issue (see below). This will indicate the creator that we have noticed the
|
||||
issue, and that it's not "fire & forget" tracker.
|
||||
|
||||
1. Initial classification should be possible even when much of the information is
|
||||
missing yet. In this case, the issue would be classified as such (see below).
|
||||
Again, this indicates that someone has noticed the issue, and there is activity
|
||||
in progress to get the required information.
|
||||
|
||||
1. Classification of the issue can change over its life-cycle. However, once the
|
||||
issue has been initially classified correctly (that is, with something else than
|
||||
the "placeholder" classification discussed above), changes to the classification
|
||||
should be discussed first with the person who initially classified the issue.
|
||||
|
||||
## Classification
|
||||
|
||||
We have introduced some new labels in the GH issue tracker for classifying the
|
||||
bug issues. These labels are prefixed with the string `bug/`, and should be
|
||||
applied to all new issues in our tracker.
|
||||
|
||||
### Classification requires more information
|
||||
|
||||
If it is not yet possible to classify the bug, i.e. because more information is
|
||||
required to correctly classify the bug, you should always set the label
|
||||
`bug/in-triage` to make it clear that triage process has started but could not
|
||||
yet be completed.
|
||||
|
||||
### Issue type
|
||||
|
||||
If it's clear that a bug issue is not a bug, but a question or reach for support,
|
||||
it should be marked as such:
|
||||
|
||||
* Remove any of the labels prefixed `bug/` that might be attached to the issue
|
||||
* Remove the label `bug` from the issue
|
||||
* Add the label `inquiry` to the issue
|
||||
|
||||
If the inquiry turns out to be something that should be covered by the docs, but
|
||||
is not, the following actions should be taken:
|
||||
|
||||
* The title of the issue should be adapted that it will be clear that the bug
|
||||
affects the docs, not the code
|
||||
* The label `documentation` should be attached to the issue
|
||||
|
||||
If the issue is too confusing (can happen), another possibility is to close the
|
||||
issue and create a new one as described in above (with a meaningful title and
|
||||
the label `documentation` attached to it).
|
||||
|
||||
### Validity
|
||||
|
||||
Some reported bugs may be invalid. It could be a user error, a misconfiguration
|
||||
or something along these lines. If it is clear that the bug falls into one of
|
||||
these categories:
|
||||
|
||||
* Remove any of the labels prefixed `bug/` that might be attached to the issue
|
||||
* Add the label `invalid` to the issue
|
||||
* Retain the `bug` label to the issue
|
||||
* Close the issue
|
||||
|
||||
When closing the issue, it is important to let requester know why the issue
|
||||
has been closed. The optimum would be to provide a solution to his request
|
||||
in the comments of the issue, or at least pointers to possible solutions.
|
||||
|
||||
### Regressions
|
||||
|
||||
Sometimes it happens that something that worked in a previous release does
|
||||
not work now when it should still work. If this is the case, the following
|
||||
actions should be done
|
||||
|
||||
* Add the label `regression` to the issue
|
||||
* Continue with triage
|
||||
|
||||
### Severity
|
||||
|
||||
It is important to find out how severe the impact of a bug is, and to label
|
||||
the bug with this information. For this purpose, the following labels exist
|
||||
in our tracker:
|
||||
|
||||
* `bug/severity:minor`: Bug has limited impact and maybe affects only an
|
||||
edge-case. Core functionality is not affected, and there is no data loss
|
||||
involved. Something might not work as expected. Example of these kind of
|
||||
bugs could be a CLI command that is not working as expected, a glitch in
|
||||
the UI, wrong documentation, etc.
|
||||
|
||||
* `bug/severity:major`: Malfunction in one of the core components, impacting
|
||||
a majority of users or one of the core functionalities in ArgoCD. There is
|
||||
no data loss involved, but for example a sync is not working due to a bug
|
||||
in ArgoCD (and not due to user error), manifests fail to render, etc.
|
||||
|
||||
* `bug/severity:critical`: A critical bug in ArgoCD, possibly resulting in
|
||||
data loss, integrity breach or severe degraded overall functionality.
|
||||
|
||||
### Priority
|
||||
|
||||
The priority of an issue indicates how quickly the issue should be fixed and
|
||||
released. This information should help us in deciding the target release for
|
||||
the fix, and whether a bug would even justify a dedicated patch release. The
|
||||
following labels can be used to classify bugs into their priority:
|
||||
|
||||
* `bug/priority:low`: Will be fixed without any specific target release.
|
||||
|
||||
* `bug/priority:medium`: Should be fixed in the minor or major release, which
|
||||
ever comes first.
|
||||
|
||||
* `bug/priority:high`: Should be fixed with the next patch release.
|
||||
|
||||
* `bug/priority:urgent`: Should be fixed immediately and might even justify a
|
||||
dedicated patch release.
|
||||
|
||||
The priority should be set according to the value of the fix and the attached
|
||||
severity. This means. a bug with a severity of `minor` could still be classified
|
||||
with priority `high`, when it is a *low hanging fruit* (i.e. the bug is easy to
|
||||
fix with low effort) and contributes to overall user experience of ArgoCD.
|
||||
|
||||
Likewise, a bug classified with a severity of `major` could still have a
|
||||
priority of `medium`, if there is a workaround available for the bug which
|
||||
mitigates the effects of the bug to a bearable extend.
|
||||
|
||||
Bugs classified with a severity of `critical` most likely belong to either
|
||||
the `urgent` priority, or to the `high` category when there is a workaround
|
||||
available.
|
||||
|
||||
Bugs that have a `regression`label attached (see Regression above) should
|
||||
usually be handled with higher priority, so those kind of issues will most
|
||||
likely have a priority of `high` or `urgent` attached to it.
|
||||
|
||||
## Summary
|
||||
|
||||
Applying a little discipline when working with our issue tracker could greatly
|
||||
help us in making informed decision about which bugs to fix when. Also, it
|
||||
would help us to get a clear view whether we can do for example a new minor
|
||||
release without having forgot any outstanding issues that should make it into
|
||||
that release.
|
||||
|
||||
If we are able to work with classification of bug issues, we might want to
|
||||
extend the triage for enhancement proposals and PRs as well.
|
||||
BIN
docs/developer-guide/ci-pipeline-failed.png
Normal file
BIN
docs/developer-guide/ci-pipeline-failed.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 111 KiB |
@@ -1,5 +1,9 @@
|
||||
# CI
|
||||
|
||||
!!!warning
|
||||
This documentation is out-of-date. Please bear with us while we work to
|
||||
update the documentation to reflect reality!
|
||||
|
||||
## Troubleshooting Builds
|
||||
|
||||
### "Check nothing has changed" step fails
|
||||
@@ -38,4 +42,12 @@ make builder-image IMAGE_NAMESPACE=argoproj IMAGE_TAG=v1.0.0
|
||||
|
||||
## Public CD
|
||||
|
||||
[https://cd.apps.argoproj.io/](https://cd.apps.argoproj.io/)
|
||||
Every commit to master is built and published to `docker.pkg.github.com/argoproj/argo-cd/argocd:<version>-<short-sha>`. The list of images is available at
|
||||
https://github.com/argoproj/argo-cd/packages.
|
||||
|
||||
!!! note
|
||||
Github docker registry [requires](https://github.community/t5/GitHub-Actions/docker-pull-from-public-GitHub-Package-Registry-fail-with-quot/m-p/32888#M1294) authentication to read
|
||||
even publicly available packages. Follow the steps from Kubernetes [documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry)
|
||||
to configure image pull secret if you want to use `docker.pkg.github.com/argoproj/argo-cd/argocd` image.
|
||||
|
||||
The image is automatically deployed to the dev Argo CD instance: [https://cd.apps.argoproj.io/](https://cd.apps.argoproj.io/)
|
||||
|
||||
258
docs/developer-guide/contributing.md
Normal file
258
docs/developer-guide/contributing.md
Normal file
@@ -0,0 +1,258 @@
|
||||
# Contribution guide
|
||||
|
||||
## Preface
|
||||
|
||||
We want to make contributing to ArgoCD as simple and smooth as possible.
|
||||
|
||||
This guide shall help you in setting up your build & test environment, so that you can start developing and testing bug fixes and feature enhancements without having to make too much effort in setting up a local toolchain.
|
||||
|
||||
If you want to to submit a PR, please read this document carefully, as it contains important information guiding you through our PR quality gates.
|
||||
|
||||
As is the case with the development process, this document is under constant change. If you notice any error, or if you think this document is out-of-date, or if you think it is missing something: Feel free to submit a PR or submit a bug to our GitHub issue tracker.
|
||||
|
||||
If you need guidance with submitting a PR, or have any other questions regarding development of ArgoCD, do not hestitate to [join our Slack](https://argoproj.github.io/community/join-slack) and get in touch with us in the `#argo-dev` channel!
|
||||
|
||||
## Before you start
|
||||
|
||||
You will need at least the following things in your toolchain in order to develop and test ArgoCD locally:
|
||||
|
||||
* A Kubernetes cluster. You won't need a fully blown multi-master, multi-node cluster, but you will need something like K3S, Minikube or microk8s. You will also need a working Kubernetes client (`kubectl`) configuration in your development environment. The configuration must reside in `~/.kube/config` and the API server URL must point to the IP address of your local machine (or VM), and **not** to `localhost` or `127.0.0.1` if you are using the virtualized development toolchain (see below)
|
||||
|
||||
* You will also need a working Docker runtime environment, to be able to build and run images.
|
||||
The Docker version must be fairly recent, and support multi-stage builds. You should not work as root. Make your local user a member of the `docker` group to be able to control the Docker service on your machine.
|
||||
|
||||
* Obviously, you will need a `git` client for pulling source code and pushing back your changes.
|
||||
|
||||
* Last but not least, you will need a Go SDK and related tools (such as GNU `make`) installed and working on your development environment.
|
||||
|
||||
* We will assume that your Go workspace is at `~/go`
|
||||
|
||||
!!! note
|
||||
**Attention minikube users**: By default, minikube will create Kubernetes client configuration that uses authentication data from files. This is incompatible with the virtualized toolchain. So if you intend to use the virtualized toolchain, you have to embed this authentication data into the client configuration. To do so, issue `minikube config set embed-certs true` and restart your minikube. Please also note that minikube using the Docker driver is currently not supported with the virtualized toolchain, because the Docker driver exposes the API server on 127.0.0.1 hard-coded. If in doubt, run `make verify-kube-connect` to find out.
|
||||
|
||||
## Submitting PRs
|
||||
|
||||
When you submit a PR against ArgoCD's GitHub repository, a couple of CI checks will be run automatically to ensure your changes will build fine and meet certain quality standards. Your contribution needs to pass those checks in order to be merged into the repository.
|
||||
|
||||
In general, it might be benefical to only submit a PR for an existing issue. Especially for larger changes, an Enhancement Proposal should exist before.
|
||||
|
||||
!!!note
|
||||
|
||||
Please make sure that you always create PRs from a branch that is up-to-date with the latest changes from ArgoCD's master branch. Depending on how long it takes for the maintainers to review and merge your PR, it might be necessary to pull in latest changes into your branch again.
|
||||
|
||||
Please understand that we, as an Open Source project, have limited capacities for reviewing and merging PRs to ArgoCD. We will do our best to review your PR and give you feedback as soon as possible, but please bear with us if it takes a little longer as expected.
|
||||
|
||||
The following read will help you to submit a PR that meets the standards of our CI tests:
|
||||
|
||||
### Title of the PR
|
||||
|
||||
Please use a meaningful and consise title for your PR. This will help us to pick PRs for review quickly, and the PR title will also end up in the Changelog.
|
||||
|
||||
We use the [Semantic PR title checker](https://github.com/zeke/semantic-pull-requests) to categorize your PR into one of the following categories:
|
||||
|
||||
* `fix` - Your PR contains one or more code bug fixes
|
||||
* `feat` - Your PR contains a new feature
|
||||
* `docs` - Your PR improves the documentation
|
||||
* `chore` - Your PR improves any internals of ArgoCD, such as the build process, unit tests, etc
|
||||
|
||||
Please prefix the title of your PR with one of the valid categories. For example, if you chose the title your PR `Add documentation for GitHub SSO integration`, please use `docs: Add documentation for GitHub SSO integration` instead.
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
Every contributor to ArgoCD must have signed the current Contributor License Agreement (CLA). You only have to sign the CLA when you are a first time contributor, or when the agreement has changed since your last time signing it. The main purpose of the CLA is to ensure that you hold the required rights for your contribution. The CLA signing is an automated process.
|
||||
|
||||
You can read the current version of the CLA [here](https://cla-assistant.io/argoproj/argo-cd).
|
||||
|
||||
### PR template checklist
|
||||
|
||||
Upon opening a PR, the details will contain a checklist from a template. Please read the checklist, and tick those marks that apply to you.
|
||||
|
||||
### Automated builds & tests
|
||||
|
||||
After you have submitted your PR, and whenever you push new commits to that branch, GitHub will run a number of Continuous Integration checks against your code. It will execute the following actions, and each of them has to pass:
|
||||
|
||||
* Build the Go code (`make build`)
|
||||
* Generate API glue code and manifests (`make codegen`)
|
||||
* Run a Go linter on the code (`make lint`)
|
||||
* Run the unit tests (`make test`)
|
||||
* Run the End-to-End tests (`make test-e2e`)
|
||||
* Build and lint the UI code (`make ui`)
|
||||
* Build the `argocd` CLI (`make cli`)
|
||||
|
||||
If any of these tests in the CI pipeline fail, it means that some of your contribution is considered faulty (or a test might be flaky, see below).
|
||||
|
||||
### Code test coverage
|
||||
|
||||
We use [CodeCov](https://codecov.io) in our CI pipeline to check for test coverage, and once you submit your PR, it will run and report on the coverage difference as a comment within your PR. If the difference is too high in the negative, i.e. your submission introduced a significant drop in code coverage, the CI check will fail.
|
||||
|
||||
Whenever you develop a new feature or submit a bug fix, please also write appropriate unit tests for it. If you write a completely new module, please aim for at least 80% of coverage.
|
||||
If you want to see how much coverage just a specific module (i.e. your new one) has, you can set the `TEST_MODULE` to the (fully qualified) name of that module with `make test`, i.e.
|
||||
|
||||
```bash
|
||||
make test TEST_MODULE=github.com/argoproj/argo-cd/server/cache
|
||||
...
|
||||
ok github.com/argoproj/argo-cd/server/cache 0.029s coverage: 89.3% of statements
|
||||
```
|
||||
|
||||
## Local vs Virtualized toolchain
|
||||
|
||||
ArgoCD provides a fully virtualized development and testing toolchain using Docker images. It is recommended to use those images, as they provide the same runtime environment as the final product and it is much easier to keep up-to-date with changes to the toolchain and dependencies. But as using Docker comes with a slight performance penalty, you might want to setup a local toolchain.
|
||||
|
||||
Most relevant targets for the build & test cycles in the `Makefile` provide two variants, one of them suffixed with `-local`. For example, `make test` will run unit tests in the Docker container, `make test-local` will run it natively on your local system.
|
||||
|
||||
If you are going to use the virtualized toolchain, please bear in mind the following things:
|
||||
|
||||
* Your Kubernetes API server must listen on the interface of your local machine or VM, and not on `127.0.0.1` only.
|
||||
* Your Kubernetes client configuration (`~/.kube/config`) must not use an API URL that points to `localhost` or `127.0.0.1`.
|
||||
|
||||
You can test whether the virtualized toolchain has access to your Kubernetes cluster by running `make verify-kube-connect` (*after* you have setup your development environment, as described below), which will run `kubectl version` inside the Docker container used for running all tests.
|
||||
|
||||
The Docker container for the virtualized toolchain will use the following local mounts from your workstation, and possibly modify its contents:
|
||||
|
||||
* `~/go/src` - Your Go workspace's source directory (modifications expected)
|
||||
* `~/.cache/go-build` - Your Go build cache (modifications expected)
|
||||
* `~/.kube` - Your Kubernetes client configuration (no modifications)
|
||||
* `/tmp` - Your system's temp directory (modifications expected)
|
||||
|
||||
## Setting up your development environment
|
||||
|
||||
The following steps are required no matter whether you chose to use a virtualized or a local toolchain.
|
||||
|
||||
### Clone the ArgoCD repository from your personal fork on GitHub
|
||||
|
||||
* `mkdir -p ~/go/src/github.com/argoproj`
|
||||
* `cd ~/go/src/github.com/argoproj`
|
||||
* `git clone https://github.com/yourghuser/argo-cd`
|
||||
* `cd argo-cd`
|
||||
|
||||
### Optional: Setup an additional Git remote
|
||||
|
||||
While everyone has their own Git workflow, the author of this document recommends to create a remote called `upstream` in your local copy pointing to the original ArgoCD repository. This way, you can easily keep your local branches up-to-date by merging in latest changes from the ArgoCD repository, i.e. by doing a `git pull upstream master` in your locally checked out branch. To create the remote, run `git remote add upstream https://github.com/argoproj/argo-cd`
|
||||
|
||||
### Install the must-have requirements
|
||||
|
||||
Make sure you fulfill the pre-requisites above and run some preliminary tests. Neither of them should report an error.
|
||||
|
||||
* Run `kubectl version`
|
||||
* Run `docker version`
|
||||
* Run `go version`
|
||||
|
||||
### Build (or pull) the required Docker image
|
||||
|
||||
Build the required Docker image by running `make test-tools-image` or pull the latest version by issuing `docker pull argoproj/argocd-test-tools`.
|
||||
|
||||
The `Dockerfile` used to build these images can be found at `test/container/Dockerfile`.
|
||||
|
||||
### Test connection from build container to your K8s cluster
|
||||
|
||||
Run `make verify-kube-connect`, it should execute without error.
|
||||
|
||||
If you receive an error similar to the following:
|
||||
|
||||
```
|
||||
The connection to the server 127.0.0.1:6443 was refused - did you specify the right host or port?
|
||||
make: *** [Makefile:386: verify-kube-connect] Error 1
|
||||
```
|
||||
|
||||
you should edit your `~/.kube/config` and modify the `server` option to point to your correct K8s API (as described above).
|
||||
|
||||
## The development cycle
|
||||
|
||||
When you have developed and possibly manually tested the code you want to contribute, you should ensure that everything will build correctly. Commit your changes to the local copy of your Git branch and perform the following steps:
|
||||
|
||||
### Pull in all build dependencies
|
||||
|
||||
As build dependencies change over time, you have to synchronize your development environment with the current specification. In order to pull in all required depencies, issue:
|
||||
|
||||
* `make dep`
|
||||
* `make dep-ensure`
|
||||
* `make dep-ui`
|
||||
|
||||
### Generate API glue code and other assets
|
||||
|
||||
ArgoCD relies on Google's [Protocol Buffers](https://developers.google.com/protocol-buffers) for its API, and this makes heavy use of auto-generated glue code and stubs. Whenever you touched parts of the API code, you must re-generate the auto generated code.
|
||||
|
||||
* Run `make codegen`, this might take a while
|
||||
* Check if something has changed by running `git status` or `git diff`
|
||||
* Commit any possible changes to your local Git branch, an appropriate commit message would be `Changes from codegen`, for example.
|
||||
|
||||
!!!note
|
||||
There are a few non-obvious assets that are auto-generated. You should not change the autogenerated assets, as they will be overwritten by a subsequent run of `make codegen`. Instead, change their source files. Prominent examples of non-obvious auto-generated code are `swagger.json` or the installation manifest YAMLs.
|
||||
|
||||
### Build your code and run unit tests
|
||||
|
||||
After the code glue has been generated, your code should build and the unit tests should run without any errors. Execute the following statements:
|
||||
|
||||
* `make build`
|
||||
* `make test`
|
||||
|
||||
These steps are non-modifying, so there's no need to check for changes afterwards.
|
||||
|
||||
### Lint your code base
|
||||
|
||||
In order to keep a consistent code style in our source tree, your code must be well-formed in accordance to some widely accepted rules, which are applied by a Linter.
|
||||
|
||||
The Linter might make some automatic changes to your code, such as indentation fixes. Some other errors reported by the Linter have to be fixed manually.
|
||||
|
||||
* Run `make lint` and observe any errors reported by the Linter
|
||||
* Fix any of the errors reported and commit to your local branch
|
||||
* Finally, after the Linter reports no errors anymore, run `git status` or `git diff` to check for any changes made automatically by Lint
|
||||
* If there were automatic changes, commit them to your local branch
|
||||
|
||||
If you touched UI code, you should also run the Yarn linter on it:
|
||||
|
||||
* Run `make lint-ui`
|
||||
* Fix any of the errors reported by it
|
||||
|
||||
## Setting up a local toolchain
|
||||
|
||||
For development, you can either use the fully virtualized toolchain provided as Docker images, or you can set up the toolchain on your local development machine. Due to the dynamic nature of requirements, you might want to stay with the virtualized environment.
|
||||
|
||||
### Install required dependencies and build-tools
|
||||
|
||||
!!!note
|
||||
The installations instructions are valid for Linux hosts only. Mac instructions will follow shortly.
|
||||
|
||||
For installing the tools required to build and test ArgoCD on your local system, we provide convinient installer scripts. By default, they will install binaries to `/usr/local/bin` on your system, which might require `root` privileges.
|
||||
|
||||
You can change the target location by setting the `BIN` environment before running the installer scripts. For example, you can install the binaries into `~/go/bin` (which should then be the first component in your `PATH` environment, i.e. `export PATH=~/go/bin:$PATH`):
|
||||
|
||||
```shell
|
||||
make BIN=~/go/bin install-tools-local
|
||||
```
|
||||
|
||||
Additionally, you have to install at least the following tools via your OS's package manager (this list might not be always up-to-date):
|
||||
|
||||
* Git LFS plugin
|
||||
* GnuPG version 2
|
||||
|
||||
### Install Go dependencies
|
||||
|
||||
You need to pull in all required Go dependencies. To do so, run
|
||||
|
||||
* `make dep-ensure-local`
|
||||
* `make dep-local`
|
||||
|
||||
### Test your build toolchain
|
||||
|
||||
The first thing you can do whether your build toolchain is setup correctly is by generating the glue code for the API and after that, run a normal build:
|
||||
|
||||
* `make codegen-local`
|
||||
* `make build-local`
|
||||
|
||||
This should return without any error.
|
||||
|
||||
### Run unit-tests
|
||||
|
||||
The next thing is to make sure that unit tests are running correctly on your system. These will require that all dependencies, such as Helm, Kustomize, Git, GnuPG, etc are correctly installed and fully functioning:
|
||||
|
||||
* `make test-local`
|
||||
|
||||
### Run end-to-end tests
|
||||
|
||||
The final step is running the End-to-End testsuite, which makes sure that your Kubernetes dependencies are working properly. This will involve starting all of the ArgoCD components locally on your computer. The end-to-end tests consists of two parts: a server component, and a client component.
|
||||
|
||||
* First, start the End-to-End server: `make start-e2e-local`. This will spawn a number of processes and services on your system.
|
||||
* When all components have started, run `make test-e2e-local` to run the end-to-end tests against your local services.
|
||||
|
||||
For more information about End-to-End tests, refer to the [End-to-End test documentation](test-e2e.md).
|
||||
65
docs/developer-guide/faq.md
Normal file
65
docs/developer-guide/faq.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Contribution FAQ
|
||||
|
||||
## General
|
||||
|
||||
### Can I discuss my contribution ideas somewhere?
|
||||
|
||||
Sure thing! You can either open an Enhancement Proposal in our GitHub issue tracker or you can [join us on Slack](https://argoproj.github.io/community/join-slack) in channel #argo-dev to discuss your ideas and get guidance for submitting a PR.
|
||||
|
||||
### Noone has looked at my PR yet. Why?
|
||||
|
||||
As we have limited man power, it can sometimes take a while for someone to respond to your PR. Especially, when your PR contains complex or non-obvious changes. Please bear with us, we try to look at every PR that we receive.
|
||||
|
||||
### Why has my PR been declined? I put much work in it!
|
||||
|
||||
We appreciate that you have put your valuable time and know how into a contribution. Alas, some changes do not fit into the overall ArgoCD philosophy, and therefore can't be merged into the official ArgoCD source tree.
|
||||
|
||||
To be on the safe side, make sure that you have created an Enhancement Proposal for your change before starting to work on your PR and have gathered enough feedback from the community and the maintainers.
|
||||
|
||||
## Failing CI checks
|
||||
|
||||
### One of the CI checks failed. Why?
|
||||
|
||||
You can click on the "Details" link next to the failed step to get more details about the failure. This will take you to CircleCI website.
|
||||
|
||||

|
||||
|
||||
### Can I retrigger the checks without pushing a new commit?
|
||||
|
||||
Since the CI pipeline is triggered on Git commits, there is currently no (known) way on how to retrigger the CI checks without pushing a new commit to your branch.
|
||||
|
||||
If you are absolutely sure that the failure was due to a failure in the pipeline, and not an error within the changes you commited, you can push an empty commit to your branch, thus retriggering the pipeline without any code changes. To do so, issue
|
||||
|
||||
```bash
|
||||
git commit --allow-empty -m "Retrigger CI pipeline"
|
||||
git push origin <yourbranch>
|
||||
```
|
||||
|
||||
### Why does the build step fail?
|
||||
|
||||
Chances are that it fails for two of the following reasons in the CI while running fine on your machine:
|
||||
|
||||
* Sometimes, CircleCI kills the build step due to excessive memory usage. This happens rarely, but it has happened in the past. If you see a message like "killed" in the log output of CircleCI, you should retrigger the pipeline as described above. If the issue persists, please let us know.
|
||||
|
||||
* If the build is failing at the `Ensuring Gopkg.lock is up-to-date` step, you need to update the dependencies before you push your commits. Run `make dep-ensure` and `make dep` and commit the changes to `Gopkg.lock` to your branch.
|
||||
|
||||
### Why does the codegen step fail?
|
||||
|
||||
If the codegen step fails with "Check nothing has changed...", chances are high that you did not run `make codegen`, or did not commit the changes it made. You should double check by running `make codegen` followed by `git status` in the local working copy of your branch. Commit any changes and push them to your GH branch to have the CI check it again.
|
||||
|
||||
A second common case for this is, when you modified any of the auto generated assets, as these will be overwritten upon `make codegen`.
|
||||
|
||||
Generally, this step runs `codegen` and compares the outcome against the Git branch it has checked out. If there are differences, the step will fail.
|
||||
|
||||
### Why does the lint step fail?
|
||||
|
||||
The lint step is most likely to fail for two reasons:
|
||||
|
||||
* The `golangci-lint` process was OOM killed by CircleCI. This happens sometimes, and is annoying. This is indicated by a `Killed.` message in the CircleCI output.
|
||||
If this is the case, please re-trigger the CI process as described above and see if it runs through.
|
||||
|
||||
* Your code failed to lint correctly, or modifications were performed by the `golangci-lint` process. You should run `make lint` on your local branch and fix all the issues.
|
||||
|
||||
### Why does the test or e2e steps fail?
|
||||
|
||||
You should check for the cause of the failure on the CircleCI web site, as described above. This will give you the name of the test that has failed, and details about why. If your test are passing locally (using the virtualized toolchain), chances are that the test might be flaky and will pass the next time it is run. Please retrigger the CI pipeline as described above and see if the test step now passes.
|
||||
108
docs/developer-guide/running-locally.md
Normal file
108
docs/developer-guide/running-locally.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# Running ArgoCD locally
|
||||
|
||||
## Run ArgoCD outside of Kubernetes
|
||||
|
||||
During development, it might be viable to run ArgoCD outside of a Kubernetes cluster. This will greatly speed up development, as you don't have to constantly build, push and install new ArgoCD Docker images with your latest changes.
|
||||
|
||||
You will still need a working Kubernetes cluster, as described in the [contributing.md](Contribution Guide), where ArgoCD will store all of its resources.
|
||||
|
||||
If you followed the [contributing.md](Contribution Guide) in setting up your toolchain, you can run ArgoCD locally with these simple steps:
|
||||
|
||||
### Scale down any ArgoCD instance in your cluster
|
||||
|
||||
First make sure that ArgoCD is not running in your development cluster by scaling down the deployments:
|
||||
|
||||
```shell
|
||||
kubectl -n argocd scale deployment/argocd-application-controller --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-dex-server --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-repo-server --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-server --replicas 0
|
||||
kubectl -n argocd scale deployment/argocd-redis --replicas 0
|
||||
```
|
||||
|
||||
### Start local services
|
||||
|
||||
When you use the virtualized toolchain, starting local services is as simple as running
|
||||
|
||||
```bash
|
||||
make start
|
||||
```
|
||||
|
||||
This will start all ArgoCD services and the UI in a Docker container and expose the following ports to your host:
|
||||
|
||||
* The ArgoCD API server on port 8080
|
||||
* The ArgoCD UI server on port 4000
|
||||
|
||||
You can now use either the web UI by pointing your browser to `http://localhost:4000` or use the CLI against the API at `http://localhost:8080`. Be sure to use the `--insecure` and `--plaintext` options to the CLI.
|
||||
|
||||
As an alternative to using the above command line parameters each time you call `argocd` CLI, you can set the following environment variables:
|
||||
|
||||
```bash
|
||||
export ARGOCD_SERVER=127.0.0.1:8080
|
||||
export ARGOCD_OPTS="--plaintext --insecure"
|
||||
```
|
||||
|
||||
### Scale up ArgoCD in your cluster
|
||||
|
||||
Once you have finished testing your changes locally and want to bring back ArgoCD in your development cluster, simply scale the deployments up again:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd scale deployment/argocd-application-controller --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-dex-server --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-repo-server --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-server --replicas 1
|
||||
kubectl -n argocd scale deployment/argocd-redis --replicas 1
|
||||
```
|
||||
|
||||
## Run your own ArgoCD images on your cluster
|
||||
|
||||
For your final tests, it might be necessary to build your own images and run them in your development cluster.
|
||||
|
||||
### Create Docker account and login
|
||||
|
||||
You might need to create a account on [Docker Hub](https://hub.docker.com) if you don't have one already. Once you created your account, login from your development environment:
|
||||
|
||||
```bash
|
||||
docker login
|
||||
```
|
||||
|
||||
### Create and push Docker images
|
||||
|
||||
You will need to push the built images to your own Docker namespace:
|
||||
|
||||
```bash
|
||||
export IMAGE_NAMESPACE=youraccount
|
||||
```
|
||||
|
||||
If you don't set `IMAGE_TAG` in your environment, the default of `:latest` will be used. To change the tag, export the variable in the environment:
|
||||
|
||||
```bash
|
||||
export IMAGE_TAG=1.5.0-myrc
|
||||
```
|
||||
|
||||
Then you can build & push the image in one step:
|
||||
|
||||
```bash
|
||||
DOCKER_PUSH=true make image
|
||||
```
|
||||
|
||||
### Configure manifests for your image
|
||||
|
||||
With `IMAGE_NAMESPACE` and `IMAGE_TAG` still set, run
|
||||
|
||||
```bash
|
||||
make manifests
|
||||
```
|
||||
|
||||
to build a new set of installation manifests which include your specific image reference.
|
||||
|
||||
!!!note
|
||||
Do not commit these manifests to your repository. If you want to revert the changes, the easiest way is to unset `IMAGE_NAMESPACE` and `IMAGE_TAG` from your environment and run `make manifests` again. This will re-create the default manifests.
|
||||
|
||||
### Configure your cluster with custom manifests
|
||||
|
||||
The final step is to push the manifests to your cluster, so it will pull and run your image:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd --force -f manifests/install.yaml
|
||||
```
|
||||
@@ -1,5 +1,9 @@
|
||||
# E2E Tests
|
||||
|
||||
!!!warning
|
||||
This documentation is out-of-date. Please bear with us while we work to
|
||||
update the documentation to reflect reality!
|
||||
|
||||
The directory contains E2E tests and test applications. The test assume that Argo CD services are installed into `argocd-e2e` namespace or cluster in current context. One throw-away
|
||||
namespace `argocd-e2e***` is created prior to tests execute. The throw-away namespace is used as a target namespace for test applications.
|
||||
|
||||
|
||||
15
docs/faq.md
15
docs/faq.md
@@ -47,6 +47,10 @@ kubectl -n argocd patch secret argocd-secret \
|
||||
|
||||
Another option is to delete both the `admin.password` and `admin.passwordMtime` keys and restart argocd-server. This will set the password back to the pod name as per [the getting started guide](getting_started.md).
|
||||
|
||||
## How to disable admin user?
|
||||
|
||||
Add `admin.enabled: "false"` to the `argocd-cm` ConfigMap (see [user management](operator-manual/user-management/index.md)).
|
||||
|
||||
## Argo CD cannot deploy Helm Chart based applications without internet access, how can I solve it?
|
||||
|
||||
Argo CD might fail to generate Helm chart manifests if the chart has dependencies located in external repositories. To solve the problem you need to make sure that `requirements.yaml`
|
||||
@@ -111,11 +115,9 @@ E.g.
|
||||
* `'3072Mi'` normalized to `'3Gi'`
|
||||
* `3072` normalized to `'3072'` (quotes added)
|
||||
|
||||
To fix this - replace your values with the normalized values.
|
||||
To fix this use diffing customizations [settings](./user-guide/diffing.md#known-kubernetes-types-in-crds-resource-limits-volume-mounts-etc).
|
||||
|
||||
See [#1615](https://github.com/argoproj/argo-cd/issues/1615)
|
||||
|
||||
# How Do I Fix "invalid cookie, longer than max length 4093"?
|
||||
## How Do I Fix "invalid cookie, longer than max length 4093"?
|
||||
|
||||
Argo CD uses a JWT as the auth token. You likely are part of many groups and have gone over the 4KB limit which is set for cookies.
|
||||
You can get the list of groups by opening "developer tools -> network"
|
||||
@@ -146,3 +148,8 @@ argocd ... --insecure
|
||||
```
|
||||
|
||||
!!! warning "Do not use `--insecure` in production"
|
||||
|
||||
## I have configured Dex via `dex.config` in `argocd-cm`, it still says Dex is unconfigured. Why?
|
||||
|
||||
Most likely you forgot to set the `url` in `argocd-cm` to point to your ArgoCD as well. See also
|
||||
[the docs](/operator-manual/user-management/#2-configure-argo-cd-for-sso)
|
||||
|
||||
@@ -24,8 +24,9 @@ kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
|
||||
```
|
||||
|
||||
Follow our [getting started guide](getting_started.md). Further [documentation](docs/)
|
||||
is provided for additional features.
|
||||
Follow our [getting started guide](getting_started.md). Further user oriented [documentation](user_guide/)
|
||||
is provided for additional features. If you are looking to upgrade ArgoCD, see the [upgrade guide](./operator-manual/upgrading/overview.md).
|
||||
Developer oriented [documentation](developer-guide/) is available for people interested in building third-party integrations.
|
||||
|
||||
## How it works
|
||||
|
||||
|
||||
@@ -27,9 +27,11 @@ spec:
|
||||
# Release name override (defaults to application name)
|
||||
releaseName: guestbook
|
||||
|
||||
# Helm values files for overriding values in the helm chart
|
||||
# The path is relative to the spec.source.path directory defined above
|
||||
valueFiles:
|
||||
- values-prod.yaml
|
||||
|
||||
|
||||
# Values file as block file
|
||||
values: |
|
||||
ingress:
|
||||
@@ -48,6 +50,8 @@ spec:
|
||||
|
||||
# kustomize specific config
|
||||
kustomize:
|
||||
# Optional kustomize version. Note: version must be configured in argocd-cm ConfigMap
|
||||
version: v3.5.4
|
||||
# Optional image name prefix
|
||||
namePrefix: prod-
|
||||
# Optional images passed to "kustomize edit set image".
|
||||
@@ -90,6 +94,8 @@ spec:
|
||||
automated:
|
||||
prune: true # Specifies if resources should be pruned during auto-syncing ( false by default ).
|
||||
selfHeal: true # Specifies if partial app sync should be executed when resources are changed only in target Kubernetes cluster and no git change detected ( false by default ).
|
||||
syncOptions: # Sync options which modifies sync behavior
|
||||
- Validate=false # disables resource validation (equivalent to 'kubectl apply --validate=true')
|
||||
|
||||
# Ignore differences at the specified json pointers
|
||||
ignoreDifferences:
|
||||
|
||||
@@ -100,7 +100,7 @@ data:
|
||||
# List of json pointers in the object to ignore differences
|
||||
ignoreDifferences: |
|
||||
jsonPointers:
|
||||
- webhooks/0/clientConfig/caBundle
|
||||
- /webhooks/0/clientConfig/caBundle
|
||||
certmanager.k8s.io/Certificate:
|
||||
# Lua script for customizing the health status assessment
|
||||
health.lua: |
|
||||
@@ -183,6 +183,16 @@ data:
|
||||
clusters:
|
||||
- "*.local"
|
||||
|
||||
# By default all resource group/kinds are included. The resource.inclusions setting allows customizing
|
||||
# list of included group/kinds.
|
||||
resource.inclusions: |
|
||||
- apiGroups:
|
||||
- repositories.stash.appscode.com
|
||||
kinds:
|
||||
- Snapshot
|
||||
clusters:
|
||||
- "*.local"
|
||||
|
||||
# Configuration to add a config management plugin.
|
||||
configManagementPlugins: |
|
||||
- name: kasane
|
||||
@@ -194,7 +204,20 @@ data:
|
||||
# Build options/parameters to use with `kustomize build` (optional)
|
||||
kustomize.buildOptions: --load_restrictor none
|
||||
|
||||
# Additional Kustomize versions and corresponding binary paths
|
||||
kustomize.version.v3.5.1: /custom-tools/kustomize_3_5_1
|
||||
kustomize.version.v3.5.4: /custom-tools/kustomize_3_5_4
|
||||
|
||||
# The metadata.label key name where Argo CD injects the app name as a tracking label (optional).
|
||||
# Tracking labels are used to determine which resources need to be deleted when pruning.
|
||||
# If omitted, Argo CD injects the app name into the label: 'app.kubernetes.io/instance'
|
||||
application.instanceLabelKey: mycompany.com/appname
|
||||
|
||||
# disables admin user. Admin is enabled by default
|
||||
admin.enabled: "false"
|
||||
# add an additional local user with apiKey and login capabilities
|
||||
# apiKey - allows generating API keys
|
||||
# login - allows to login using UI
|
||||
accounts.alice: apiKey, login
|
||||
# disables user. User is enabled by default
|
||||
accounts.alice.enabled: "false"
|
||||
@@ -34,3 +34,10 @@ data:
|
||||
webhook.bitbucketserver.secret: shhhh! it's a bitbucket server secret
|
||||
# gogs server webhook secret
|
||||
webhook.gogs.secret: shhhh! it's a gogs server secret
|
||||
|
||||
# an additional user password and its last modified time (see user definition in argocd-cm.yaml)
|
||||
accounts.alice.password:
|
||||
accounts.alice.passwordMtime:
|
||||
# list of generated account tokens/api keys
|
||||
accounts.alice.tokens: |
|
||||
[{"id":"123","iat":1583789194,"exp":1583789194}]
|
||||
@@ -14,7 +14,7 @@ can be customized to use alternative toolchain required by your environment.
|
||||
|
||||
## Adding Tools Via Volume Mounts
|
||||
|
||||
The first technique is to use an `init` container and a `volumeMount` to copy a different verison of
|
||||
The first technique is to use an `init` container and a `volumeMount` to copy a different version of
|
||||
a tool into the repo-server container. In the following example, an init container is overwriting
|
||||
the helm binary with a different version than what is bundled in Argo CD:
|
||||
|
||||
|
||||
@@ -100,6 +100,12 @@ spec:
|
||||
kind: LimitRange
|
||||
- group: ''
|
||||
kind: NetworkPolicy
|
||||
# Deny all namespaced-scoped resources from being created, except for Deployment and StatefulSet
|
||||
namespaceResourceWhilelist:
|
||||
- group: 'apps'
|
||||
kind: Deployment
|
||||
- group: 'apps'
|
||||
kind: StatefulSet
|
||||
roles:
|
||||
# A role which provides read-only access to all applications in the project
|
||||
- name: read-only
|
||||
@@ -122,6 +128,12 @@ spec:
|
||||
|
||||
## Repositories
|
||||
|
||||
!!!note
|
||||
Some Git hosters - notably GitLab and possibly on-premise GitLab instances as well - require you to
|
||||
specify the `.git` suffix in the repository URL, otherwise they will send a HTTP 301 redirect to the
|
||||
repository URL suffixed with `.git`. ArgoCD will **not** follow these redirects, so you have to
|
||||
adapt your repository URL to be suffixed with `.git`.
|
||||
|
||||
Repository credentials are stored in secret. Use following steps to configure a repo:
|
||||
|
||||
1. Create secret which contains repository credentials. Consider using [bitnami-labs/sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) to store encrypted secret
|
||||
@@ -506,7 +518,7 @@ data:
|
||||
key: key
|
||||
```
|
||||
|
||||
## Resource Exclusion
|
||||
## Resource Exclusion/Inclusion
|
||||
|
||||
Resources can be excluded from discovery and sync so that ArgoCD is unaware of them. For example, `events.k8s.io` and `metrics.k8s.io` are always excluded. Use cases:
|
||||
|
||||
@@ -543,6 +555,25 @@ The `resource.exclusions` node is a list of objects. Each object can have:
|
||||
|
||||
If all three match, then the resource is ignored.
|
||||
|
||||
In addition to exclusions, you might configure the list of included resources using the `resource.inclusions` setting.
|
||||
By default, all resource group/kinds are included. The `resource.inclusions` setting allows customizing the list of included group/kinds:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
resource.inclusions: |
|
||||
- apiGroups:
|
||||
- "*"
|
||||
kinds:
|
||||
- Deployment
|
||||
clusters:
|
||||
- https://192.168.0.20
|
||||
kind: ConfigMap
|
||||
```
|
||||
|
||||
The `resource.inclusions` and `resource.exclusions` might be used together. The final list of resources includes group/kinds specified in `resource.inclusions` minus group/kinds
|
||||
specified in `resource.exclusions` setting.
|
||||
|
||||
Notes:
|
||||
|
||||
* Quote globs in your YAML to avoid parsing errors.
|
||||
@@ -551,7 +582,7 @@ Notes:
|
||||
|
||||
## SSO & RBAC
|
||||
|
||||
* SSO configuration details: [SSO](./sso/index.md)
|
||||
* SSO configuration details: [SSO](./user-management/index.md)
|
||||
* RBAC configuration details: [RBAC](./rbac.md)
|
||||
|
||||
## Manage Argo CD Using Argo CD
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Disaster Recovery
|
||||
|
||||
You can use `argocd-util` can be used to import and export all Argo CD data.
|
||||
You can use `argocd-util` to import and export all Argo CD data.
|
||||
|
||||
Make sure you have `~/.kube/config` pointing to your Argo CD cluster.
|
||||
|
||||
|
||||
@@ -158,6 +158,8 @@ Traefik can be used as an edge router and provide [TLS](https://docs.traefik.io/
|
||||
|
||||
It currently has an advantage over NGINX in that it can terminate both TCP and HTTP connections _on the same port_ meaning you do not require multiple ingress objects and hosts.
|
||||
|
||||
The API server should be run with TLS disabled. Edit the `argocd-server` deployment to add the `--insecure` flag to the argocd-server command.
|
||||
|
||||
```yaml
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
@@ -196,6 +198,59 @@ ArgoCD endpoints may be protected by one or more reverse proxies layers, in that
|
||||
$ argocd login <host>:<port> --header 'x-token1:foo' --header 'x-token2:bar' # can be repeated multiple times
|
||||
$ argocd login <host>:<port> --header 'x-token1:foo,x-token2:bar' # headers can also be comma separated
|
||||
```
|
||||
## ArgoCD Server and UI Root Path (v1.5.3)
|
||||
|
||||
ArgoCD server and UI can be configured to be available under a non-root path (e.g. `/argo-cd`).
|
||||
To do this, add the `--rootpath` flag into the `argocd-server` deployment command:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
name: argocd-server
|
||||
containers:
|
||||
- command:
|
||||
- /argocd-server
|
||||
- --staticassets
|
||||
- /shared/app
|
||||
- --repo-server
|
||||
- argocd-repo-server:8081
|
||||
- --rootpath
|
||||
- /argo-cd
|
||||
```
|
||||
NOTE: The flag `--rootpath` changes both API Server and UI base URL.
|
||||
Example nginx.conf:
|
||||
|
||||
```
|
||||
worker_processes 1;
|
||||
|
||||
events { worker_connections 1024; }
|
||||
|
||||
http {
|
||||
|
||||
sendfile on;
|
||||
|
||||
server {
|
||||
listen 443;
|
||||
|
||||
location /argo-cd/ {
|
||||
proxy_pass https://localhost:8080/argo-cd/;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
# buffering should be disabled for api/v1/stream/applications to support chunked response
|
||||
proxy_buffering off;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
Flag ```--grpc-web-root-path ``` is used to provide a non-root path (e.g. /argo-cd)
|
||||
|
||||
```shell
|
||||
$ argocd login <host>:<port> --grpc-web-root-path /argo-cd
|
||||
```
|
||||
|
||||
## UI Base Path
|
||||
|
||||
@@ -241,6 +296,8 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
# buffering should be disabled for api/v1/stream/applications to support chunked response
|
||||
proxy_buffering off;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +65,7 @@ spec:
|
||||
|
||||
## Dashboards
|
||||
|
||||
You can find an example Grafana dashboard [here](https://github.com/argoproj/argo-cd/blob/master/examples/dashboard.json)
|
||||
You can find an example Grafana dashboard [here](https://github.com/argoproj/argo-cd/blob/master/examples/dashboard.json) or check demo instance
|
||||
[dashboard](https://grafana.apps.argoproj.io).
|
||||
|
||||

|
||||
|
||||
14
docs/operator-manual/notifications.md
Normal file
14
docs/operator-manual/notifications.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Notifications
|
||||
|
||||
The notifications support is not bundled into the Argo CD itself. Instead of reinventing the wheel and implementing opinionated notifications system Argo CD leverages integrations
|
||||
with the third-party notification system. Following integrations are recommended:
|
||||
|
||||
* To monitor Argo CD performance or health state of managed applications use [Prometheus Metrics](./metrics.md) in combination with [Grafana](https://grafana.com/),
|
||||
[Alertmanager](https://prometheus.io/docs/alerting/alertmanager/).
|
||||
* To notify the end-users of Argo CD about events like application upgrades, user errors in application definition, etc use one of the following projects:
|
||||
* [ArgoCD Notifications](https://github.com/argoproj-labs/argocd-notifications) - Argo CD specific notification system that continuously monitors Argo CD applications
|
||||
and aims to integrate with various notification services such as Slack, SMTP, Telegram, Discord, etc.
|
||||
* [Argo Kube Notifier](https://github.com/argoproj-labs/argo-kube-notifier) - generic Kubernetes resource controller that allows monitoring any Kubernetes resource and sends a
|
||||
notification when the configured rule is met.
|
||||
* [Kube Watch](https://github.com/bitnami-labs/kubewatch) - a Kubernetes watcher that could publishes notification to Slack/hipchat/mattermost/flock channels. It watches the
|
||||
cluster for resource changes and notifies them through webhooks.
|
||||
@@ -30,6 +30,13 @@ spec:
|
||||
- group: ''
|
||||
kind: NetworkPolicy
|
||||
|
||||
# Deny all namespaced-scoped resources from being created, except for Deployment and StatefulSet
|
||||
namespaceResourceWhilelist:
|
||||
- group: 'apps'
|
||||
kind: Deployment
|
||||
- group: 'apps'
|
||||
kind: StatefulSet
|
||||
|
||||
# Enables namespace orphaned resource monitoring.
|
||||
orphanedResources:
|
||||
warn: false
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
The RBAC feature enables restriction of access to Argo CD resources. Argo CD does not have its own
|
||||
user management system and has only one built-in user `admin`. The `admin` user is a superuser and
|
||||
it has unrestricted access to the system. RBAC requires [SSO configuration](sso/index.md). Once SSO is
|
||||
configured, additional RBAC roles can be defined, and SSO groups can man be mapped to roles.
|
||||
it has unrestricted access to the system. RBAC requires [SSO configuration](user-management/index.md) or [one or more local users setup](user-management/index.md).
|
||||
Once SSO or local users are configured, additional RBAC roles can be defined, and SSO groups or local users can man be mapped to roles.
|
||||
|
||||
## Basic Built-in Roles
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ Argo CD is un-opinionated about how secrets are managed. There's many ways to do
|
||||
* [Bitnami Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets)
|
||||
* [Godaddy Kubernetes External Secrets](https://github.com/godaddy/kubernetes-external-secrets)
|
||||
* [Hashicorp Vault](https://www.vaultproject.io)
|
||||
* [Banzai Cloud Bank-Vaults](https://github.com/banzaicloud/bank-vaults)
|
||||
* [Helm Secrets](https://github.com/futuresimple/helm-secrets)
|
||||
* [Kustomize secret generator plugins](https://github.com/kubernetes-sigs/kustomize/blob/fd7a353df6cece4629b8e8ad56b71e30636f38fc/examples/kvSourceGoPlugin.md#secret-values-from-anywhere)
|
||||
* [aws-secret-operator](https://github.com/mumoshu/aws-secret-operator)
|
||||
|
||||
@@ -153,11 +153,3 @@ Payloads from webhook events are considered untrusted. Argo CD only examines the
|
||||
the involved applications of the webhook event (e.g. which repo was modified), then refreshes
|
||||
the related application for reconciliation. This refresh is the same refresh which occurs regularly
|
||||
at three minute intervals, just fast-tracked by the webhook event.
|
||||
|
||||
## Reporting Vulnerabilities
|
||||
|
||||
Please report security vulnerabilities by e-mailing:
|
||||
|
||||
* [Jesse_Suen@intuit.com](mailto:Jesse_Suen@intuit.com)
|
||||
* [Alexander_Matyushentsev@intuit.com](mailto:Alexander_Matyushentsev@intuit.com)
|
||||
* [Edward_Lee@intuit.com](mailto:Edward_Lee@intuit.com)
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# Auth0
|
||||
|
||||
!!! note "Are you using this? Please contribute!"
|
||||
If you're using this IdP please consider [contributing](../../developer-guide/site.md) to this document.
|
||||
|
||||
<!-- markdownlint-disable MD033 -->
|
||||
<div style="text-align:center"><img src="../../../assets/argo.png" /></div>
|
||||
<!-- markdownlint-enable MD033 -->
|
||||
86
docs/operator-manual/troubleshooting.md
Normal file
86
docs/operator-manual/troubleshooting.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Troubleshooting Tools
|
||||
|
||||
The document describes how to use `argocd-tool` binary to simplify Argo CD settings customizations and troubleshot
|
||||
connectivity issues.
|
||||
|
||||
## Settings
|
||||
|
||||
Argo CD provides multiple ways to customize system behavior and has a lot of settings. It might be dangerous to modify
|
||||
settings on Argo CD used in production by multiple users. Before applying settings you can use `argocd-util` binary to
|
||||
make sure that settings are valid and Argo CD is working as expected. The `argocd-util` binary is available in `argocd`
|
||||
image and might be used using docker. Example:
|
||||
|
||||
```bash
|
||||
docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd:<version> \
|
||||
argocd-util settings validate --argocd-cm-path ./argocd-cm.yaml
|
||||
```
|
||||
|
||||
If you are using Linux you can extract `argocd-util` binary from docker image:
|
||||
|
||||
```bash
|
||||
docker run --rm -it -w /src -v $(pwd):/src argocd cp /usr/local/bin/argocd-util ./argocd-util
|
||||
```
|
||||
|
||||
The `argocd-util settings validate` command performs basic settings validation and print short summary
|
||||
of each settings group.
|
||||
|
||||
**Diffing Customization**
|
||||
|
||||
[Diffing customization](../user-guide/diffing.md) allows excluding some resource fields from diffing process.
|
||||
The diffing customizations are configured in `resource.customizations` field of `argocd-cm` ConfigMap.
|
||||
|
||||
The following `argocd-util` command prints information about fields excluded from diffing in the specified ConfigMap.
|
||||
|
||||
```bash
|
||||
docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd:<version> \
|
||||
argocd-util settings resource-overrides ignore-differences ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml
|
||||
```
|
||||
|
||||
* Health Assessment
|
||||
|
||||
[Health assessment](../user-guide/diffing.md) allows excluding some resource fields from diffing process.
|
||||
The diffing customizations are configured in `resource.customizations` field of `argocd-cm` ConfigMap.
|
||||
|
||||
The following `argocd-util` command assess resource health using Lua script configured in the specified ConfigMap.
|
||||
|
||||
```bash
|
||||
docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd:<version> \
|
||||
argocd-util settings resource-overrides health ./deploy.yaml --argocd-cm-path ./argocd-cm.yaml
|
||||
```
|
||||
|
||||
* Resource Actions
|
||||
|
||||
Resource actions allows configuring named Lua script which performs resource modification.
|
||||
|
||||
The following `argocd-util` command executes action using Lua script configured in the specified ConfigMap and prints
|
||||
applied modifications.
|
||||
|
||||
```bash
|
||||
docker run --rm -it -w /src -v $(pwd):/src argoproj/argocd:<version> \
|
||||
argocd-util settings resource-overrides action /tmp/deploy.yaml restart --argocd-cm-path /private/tmp/argocd-cm.yaml
|
||||
```
|
||||
|
||||
## Cluster credentials
|
||||
|
||||
The `argocd-util kubeconfig` is useful if you manually created Secret with cluster credentials and trying need to
|
||||
troubleshoot connectivity issues. In this case, it is suggested to use the following steps:
|
||||
|
||||
1 SSH into [argocd-application-controller] pod.
|
||||
|
||||
```
|
||||
kubectl exec -n argocd -it \
|
||||
$(kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-application-controller -o jsonpath='{.items[0].metadata.name}') bash
|
||||
```
|
||||
|
||||
2 Use `argocd-util kubeconfig` command to export kubeconfig file from the configured Secret:
|
||||
|
||||
```
|
||||
argocd-util kubeconfig https://<api-server-url> /tmp/kubeconfig --namespace argocd
|
||||
```
|
||||
|
||||
3 Use `kubectl` to get more details about connection issues, fix them and apply changes back to secret:
|
||||
|
||||
```
|
||||
export KUBECONFIG=/tmp/kubeconfig
|
||||
kubectl get pods -v 9
|
||||
```
|
||||
6
docs/operator-manual/upgrading/1.0-1.1.md
Normal file
6
docs/operator-manual/upgrading/1.0-1.1.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# v1.0 to 1.1
|
||||
|
||||
The v1.1 release does not introduce backward incompatible changes. Please note that Kustomize v1.0 is deprecated and
|
||||
support will be removed in the Argo CD v1.2 release.
|
||||
|
||||
From here on you can follow the [regular upgrade process](./overview.md).
|
||||
13
docs/operator-manual/upgrading/1.1-1.2.md
Normal file
13
docs/operator-manual/upgrading/1.1-1.2.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# v1.1 to 1.2
|
||||
|
||||
## Kustomize
|
||||
- Kustomize v1 support is removed. All kustomize charts are built using the same Kustomize version
|
||||
- Kustomize v2.0.3 upgraded to v3.1.0 . We've noticed one backward incompatible change: https://github.com/kubernetes-sigs/kustomize/issues/42 . Starting v2.1.0 namespace prefix feature works with CRD ( which might cause renaming of generated resource definitions)
|
||||
|
||||
|
||||
## ConfigMap labels
|
||||
|
||||
Argo CD config maps must be annotated with `app.kubernetes.io/part-of: argocd` label. Make sure to apply updated
|
||||
`install.yaml` manifest in addition to changing image version.
|
||||
|
||||
From here on you can follow the [regular upgrade process](./overview.md).
|
||||
8
docs/operator-manual/upgrading/1.2-1.3.md
Normal file
8
docs/operator-manual/upgrading/1.2-1.3.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# v1.2 to 1.3
|
||||
|
||||
# API Changes
|
||||
|
||||
The 1.3 release introduces backward incompatible changes in some public Argo CD APIs. Please make sure to upgrade
|
||||
Argo CD CLI to v1.3.
|
||||
|
||||
From here on you can follow the [regular upgrade process](./overview.md).
|
||||
15
docs/operator-manual/upgrading/1.3-1.4.md
Normal file
15
docs/operator-manual/upgrading/1.3-1.4.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# v1.3 to 1.4
|
||||
|
||||
## Sync Hooks
|
||||
|
||||
The Argo CD deletes all **in-flight** hooks if you terminate running sync operation. The hook state assessment change implemented in this release the Argo CD enables detection of
|
||||
an in-flight state for all Kubernetes resources including `Deployment`, `PVC`, `StatefulSet`, `ReplicaSet` etc. So if you terminate the sync operation that has, for example,
|
||||
`StatefulSet` hook that is `Progressing` it will be deleted. The long-running jobs are not supposed to be used as a sync hook and you should consider using
|
||||
[Sync Waves](https://argoproj.github.io/argo-cd/user-guide/sync-waves/) instead.
|
||||
|
||||
From here on you can follow the [regular upgrade process](./overview.md).
|
||||
|
||||
# API Changes
|
||||
|
||||
The 1.3 release introduces backward incompatible changes in some public Argo CD APIs. Please make sure to upgrade
|
||||
Argo CD CLI to v1.3.
|
||||
15
docs/operator-manual/upgrading/1.4-1.5.md
Normal file
15
docs/operator-manual/upgrading/1.4-1.5.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# v1.4 to 1.5
|
||||
|
||||
## Updated prometheus metrics
|
||||
|
||||
The `argocd_app_sync_status`, `argocd_app_health_status` and `argocd_app_created_time` prometheus metrics are deprecated
|
||||
in favor of additional labels to `argocd_app_info` metric. The deprecated labels are still available can be re-enabled
|
||||
using `ARGOCD_LEGACY_CONTROLLER_METRICS=true` environment variable. The legacy example Grafana dashboard is available at
|
||||
[examples/dashboard-legacy.json](https://github.com/argoproj/argo-cd/blob/master/examples/dashboard-legacy.json).
|
||||
|
||||
## Redis HA Proxy
|
||||
|
||||
High-availability (HA) Argo CD manifests now bundles Redis in HA Proxy in front of it. During the upgrade Redis requests
|
||||
might fail you might see intermittent login failures.
|
||||
|
||||
From here on you can follow the [regular upgrade process](./overview.md).
|
||||
43
docs/operator-manual/upgrading/overview.md
Normal file
43
docs/operator-manual/upgrading/overview.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Overview
|
||||
|
||||
!!!note
|
||||
|
||||
This section contains information on upgrading Argo CD. Before upgrading please make sure to read details about
|
||||
the breaking changes between Argo CD versions.
|
||||
|
||||
Argo CD uses the semver versioning and ensures that following rules:
|
||||
|
||||
* The patch release does not introduce any breaking changes. So if you are upgrading from v1.5.1 to v1.5.3
|
||||
there should be no special instructions to follow.
|
||||
* The minor release might introduce minor changes with a workaround. If you are upgrading from v1.3.0 to v1.5.2
|
||||
please make sure to check upgrading details in both [v1.3 to v1.4](./1.4-1.5.md) and [v1.4 to v1.5](./1.4-1.5.md)
|
||||
upgrading instructions.
|
||||
* The major release introduces backward incompatible behavior changes. It is recommended to take a backup of
|
||||
Argo CD settings using disaster recovery [guide](../disaster_recovery.md).
|
||||
|
||||
After reading the relevant notes about possible breaking changes introduced in Argo CD version use the following
|
||||
command to upgrade Argo CD. Make sure to replace `<version>` with the required version number:
|
||||
|
||||
**Non-HA**:
|
||||
|
||||
```bash
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/<version>/manifests/install.yaml
|
||||
```
|
||||
|
||||
**HA**:
|
||||
```bash
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/<version>/manifests/ha/install.yaml
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
Even though some releases require only image change it is still recommended to apply whole manifests set.
|
||||
Manifest changes might include important parameter modifications and applying the whole set will protect you from
|
||||
introducing misconfiguration.
|
||||
|
||||
<hr/>
|
||||
|
||||
* [v1.4 to v1.5](./1.4-1.5.md)
|
||||
* [v1.3 to v1.4](./1.4-1.5.md)
|
||||
* [v1.2 to v1.1](./1.4-1.5.md)
|
||||
* [v1.1 to v1.0](./1.4-1.5.md)
|
||||
66
docs/operator-manual/user-management/auth0.md
Normal file
66
docs/operator-manual/user-management/auth0.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Auth0
|
||||
|
||||
## User-definitions
|
||||
|
||||
User-definitions in Auth0 is out of scope for this guide. Add them directly in Auth0 database, use an enterprise registry, or "social login".
|
||||
*Note*: all users have access to all Auth0 defined apps unless you restrict access via configuration - keep this in mind if argo is exposed on the internet or else anyone can login.
|
||||
|
||||
## Registering the app with Auth0
|
||||
|
||||
Follow the [register app](https://auth0.com/docs/dashboard/guides/applications/register-app-spa) instructions to create the argocd app in Auth0. In the app definition:
|
||||
* Take note of the _clientId_ and _clientSecret_ values.
|
||||
* Register login url as https://your.argoingress.address/login
|
||||
* Set allowed callback url to https://your.argoingress.address/auth/callback
|
||||
* Under connections, select the user-registries you want to use with argo
|
||||
|
||||
Any other settings are non-essential for the authentication to work.
|
||||
|
||||
|
||||
## Adding authorization rules to Auth0
|
||||
|
||||
Follow Auth0 [authorization guide](https://auth0.com/docs/authorization) to setup authorization.
|
||||
The important part to note here is that group-membership is a non-standard claim, and hence is required to be put under a FQDN claim name, for instance `http://your.domain/groups`.
|
||||
|
||||
## Configuring argo
|
||||
|
||||
|
||||
### Configure OIDC for ArgoCD
|
||||
|
||||
`kubectl edit configmap argocd-cm`
|
||||
|
||||
```
|
||||
...
|
||||
data:
|
||||
application.instanceLabelKey: argocd.argoproj.io/instance
|
||||
oidc.config: |
|
||||
name: Auth0
|
||||
issuer: https://<yourtenant>.<eu|us>.auth0.com/
|
||||
clientID: <theClientId>
|
||||
clientSecret: <theClientSecret>
|
||||
requestedScopes:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
# not strictly nesscessary - but good practice:
|
||||
- 'http://your.domain/groups'
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
### Configure RBAC for ArgoCD
|
||||
|
||||
`kubectl edit configmap argocd-rbac-cm` (or use helm values).
|
||||
```
|
||||
...
|
||||
data:
|
||||
policy.csv: |
|
||||
# let members with group someProjectGroup handle apps in someProject
|
||||
# this can also be defined in the UI in the group-definition to avoid doing it there in the configmap
|
||||
p, someProjectGroup, applications, *, someProject/*, allow
|
||||
# let the group membership argocd-admins from OIDC become role:admin - needs to go into the configmap
|
||||
g, argocd-global-admins, role:admin
|
||||
policy.default: role:readonly
|
||||
# essential to get argo to use groups for RBAC:
|
||||
scopes: '[http://your.domain/groups, email]'
|
||||
...
|
||||
```
|
||||
@@ -1,7 +1,117 @@
|
||||
# SSO Overview
|
||||
# Overview
|
||||
|
||||
Argo CD does not have any local users other than the built-in `admin` user. All other users are
|
||||
expected to login via SSO. There are two ways that SSO can be configured:
|
||||
Once installed Argo CD has one built-in `admin` user that has full access to the system. It is recommended to use `admin` user only
|
||||
for initial configuration and then switch to local users or configure SSO integration.
|
||||
|
||||
## Local users/accounts (v1.5)
|
||||
|
||||
The local users/accounts feature serving to main use-cases:
|
||||
|
||||
* Auth tokens for Argo CD management automation. It is possible to configure an API account with limited permissions and generate an authentication token.
|
||||
Such token can be used to automatically create applications, projects etc.
|
||||
* Additional users for a very small team when SSO integration is overkill. The local users don't provide advanced features such as groups,
|
||||
login history etc. So if you need such features it is strongly recommended to use SSO.
|
||||
|
||||
!!! warning "Make sure to read about security limitations related to local users in [security considerations](../../security_considerations.md) document"
|
||||
|
||||
!!! note
|
||||
When you create local users, each of those users will need additional [RBAC rules](../rbac.md) set up, otherwise they will fall back to the default policy specified by `policy.default` field of the `argocd-rbac-cm` ConfigMap.
|
||||
|
||||
The maximum length of a local account's username is 32.
|
||||
|
||||
### Create new user
|
||||
|
||||
New users should be defined in `argocd-cm` ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
namespace: argocd
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-cm
|
||||
app.kubernetes.io/part-of: argocd
|
||||
data:
|
||||
# add an additional local user with apiKey and login capabilities
|
||||
# apiKey - allows generating API keys
|
||||
# login - allows to login using UI
|
||||
accounts.alice: apiKey, login
|
||||
# disables user. User is enabled by default
|
||||
accounts.alice.enabled: "false"
|
||||
```
|
||||
|
||||
Each user might have two capabilities:
|
||||
* apiKey - allows generating authentication tokens for API access
|
||||
* login - allows to login using UI
|
||||
|
||||
### Disable admin user
|
||||
|
||||
As soon as additional users are created it is recommended to disable `admin` user:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
namespace: argocd
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-cm
|
||||
app.kubernetes.io/part-of: argocd
|
||||
data:
|
||||
admin.enabled: "false"
|
||||
```
|
||||
|
||||
### Manage users
|
||||
|
||||
The Argo CD CLI provides set of commands to set user password and generate tokens.
|
||||
|
||||
* Get full users list
|
||||
```bash
|
||||
argocd account list
|
||||
```
|
||||
|
||||
* Get specific user details
|
||||
```bash
|
||||
argocd account get <username>
|
||||
```
|
||||
|
||||
* Set user password
|
||||
```bash
|
||||
argocd account update-password \
|
||||
--account <name> \
|
||||
--current-password <current-admin> \
|
||||
--new-password <new-user-password>
|
||||
```
|
||||
|
||||
* Generate auth token
|
||||
```bash
|
||||
# if flag --account is omitted then Argo CD generates token for current user
|
||||
argocd account generate-token --account <username>
|
||||
```
|
||||
|
||||
### Failed logins rate limiting
|
||||
|
||||
Argo CD rejects login attempts after too many failed in order to prevent password brute-forcing.
|
||||
The following environments variables are available to control throttling settings:
|
||||
|
||||
* `ARGOCD_SESSION_MAX_FAIL_COUNT`: Maximum number of failed logins before Argo CD starts
|
||||
rejecting login attempts. Default: 5.
|
||||
|
||||
* `ARGOCD_SESSION_FAILURE_WINDOW_SECONDS`: Number of seconds for the failure window.
|
||||
Default: 300 (5 minutes). If this is set to 0, the failure window is
|
||||
disabled and the login attempts gets rejected after 10 consecutive logon failures,
|
||||
regardless of the time frame they happened.
|
||||
|
||||
* `ARGOCD_SESSION_MAX_CACHE_SIZE`: Maximum number of entries allowed in the
|
||||
cache. Default: 1000
|
||||
|
||||
* `ARGOCD_MAX_CONCURRENT_LOGIN_REQUESTS_COUNT`: Limits max number of concurrent login requests.
|
||||
If set to 0 then limit is disabled. Default: 50.
|
||||
|
||||
## SSO
|
||||
|
||||
There are two ways that SSO can be configured:
|
||||
|
||||
* [Bundled Dex OIDC provider](#dex) - use this option if your current provider does not support OIDC (e.g. SAML,
|
||||
LDAP) or if you wish to leverage any of Dex's connector features (e.g. the ability to map GitHub
|
||||
@@ -19,7 +19,17 @@
|
||||
Directory (tenant) ID: 33333333-dddd-4444-eeee-555555555555
|
||||
Secret: some_secret
|
||||
|
||||
2. Edit `argocd-cm` and configure the `data.oidc.config` section:
|
||||
2. Setup permissions for Azure AD Application
|
||||
|
||||
On "API permissions" page find `User.Read` permission (under `Microsoft Graph`) and grant it to the created application:
|
||||
|
||||

|
||||
|
||||
Also, on "Token Configuration" page add groups claim for the groups assigned to the application:
|
||||
|
||||

|
||||
|
||||
3. Edit `argocd-cm` and configure the `data.oidc.config` section:
|
||||
|
||||
ConfigMap -> argocd-cm
|
||||
|
||||
@@ -27,18 +37,25 @@
|
||||
url: https://argocd.example.com/
|
||||
oidc.config: |
|
||||
name: Azure
|
||||
issuer: https://sts.windows.net/{directory_tenant_id}/
|
||||
issuer: https://login.microsoftonline.com/{directory_tenant_id}/v2.0
|
||||
clientID: {azure_ad_application_client_id}
|
||||
clientSecret: $oidc.azure.clientSecret
|
||||
requestedIDTokenClaims:
|
||||
groups:
|
||||
essential: true
|
||||
requestedScopes:
|
||||
- openid
|
||||
- profile
|
||||
- email
|
||||
|
||||
3. Edit `argocd-secret` and configure the `data.oidc.azure.clientSecret` section:
|
||||
4. Edit `argocd-secret` and configure the `data.oidc.azure.clientSecret` section:
|
||||
|
||||
Secret -> argocd-secret
|
||||
|
||||
data:
|
||||
oidc.azure.clientSecret: {client_secret | base64_encoded}
|
||||
|
||||
4. Edit `argocd-rbac-cm` to configure permissions
|
||||
5. Edit `argocd-rbac-cm` to configure permissions. Use group ID from Azure for assigning roles
|
||||
|
||||
[RBAC Configurations](../rbac.md)
|
||||
|
||||
@@ -52,8 +69,13 @@
|
||||
p, role:org-admin, repositories, create, *, allow
|
||||
p, role:org-admin, repositories, update, *, allow
|
||||
p, role:org-admin, repositories, delete, *, allow
|
||||
g, "Grp Argo CD", role:org-admin
|
||||
g, "84ce98d1-e359-4f3b-85af-985b458de3c6", role:org-admin
|
||||
|
||||
6. Mapping role from jwt token to argo
|
||||
|
||||
If you want to map the roles from the jwt token to match the default roles (readonly and admin) then you must change the scope variable in the rbac-configmap.
|
||||
|
||||
scopes: '[roles, email]'
|
||||
|
||||
## With Dex
|
||||
|
||||
@@ -17,7 +17,7 @@ A working Single Sign-On configuration using Okta via at least two methods was a
|
||||
1. Click `View setup instructions` after creating the application in Okta.
|
||||
* 
|
||||
1. Copy the SSO URL to the `argocd-cm` in the data.oicd
|
||||
1. Download the CA certificate to use in the `argocd-cm` configuration.
|
||||
1. Download the CA certificate to use in the `argocd-cm` configuration. If you are using this in the caData field, you will need to pass the entire certificate (including `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` stanzas) through base64 encoding, for example, `base64 my_cert.pem`.
|
||||
* 
|
||||
1. Edit the `argocd-cm` and configure the `data.dex.config` section:
|
||||
|
||||
@@ -35,7 +35,7 @@ dex.config: |
|
||||
ssoURL: https://yourorganization.oktapreview.com/app/yourorganizationsandbox_appnamesaml_2/rghdr9s6hg98s9dse/sso/saml
|
||||
# You need `caData` _OR_ `ca`, but not both.
|
||||
caData: |
|
||||
<base64 encoded CA cert>
|
||||
<CA cert passed through base64 encoding>
|
||||
# You need `caData` _OR_ `ca`, but not both.
|
||||
ca: /path/to/ca.pem
|
||||
redirectURI: https://ui.argocd.yourorganization.net/api/dex/callback
|
||||
@@ -74,7 +74,7 @@ oidc.config: |
|
||||
issuer: https://yourorganization.oktapreview.com
|
||||
clientID: 0oaltaqg3oAIf2NOa0h3
|
||||
clientSecret: ZXF_CfUc-rtwNfzFecGquzdeJ_MxM4sGc8pDT2Tg6t
|
||||
requestedScopes: ["openid", "profile", "email", "groups"].
|
||||
requestedScopes: ["openid", "profile", "email", "groups"]
|
||||
requestedIDTokenClaims: {"groups": {"essential": true}}
|
||||
```
|
||||
<!-- markdownlint-enable MD046 -->
|
||||
@@ -16,7 +16,7 @@ arbitrary value in the secret. This value will be used when configuring the webh
|
||||
|
||||

|
||||
|
||||
### 2. Configure Argo CD With The WebHook Secret Optional)
|
||||
### 2. Configure Argo CD With The WebHook Secret (Optional)
|
||||
|
||||
Configuring a webhook shared secret is optional, since Argo CD will still refresh applications
|
||||
related to the Git repository, even with unauthenticated webhook events. This is safe to do since
|
||||
|
||||
167
docs/security_considerations.md
Normal file
167
docs/security_considerations.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# Security Considerations
|
||||
|
||||
As a deployment tool, Argo CD needs to have production access which makes security a very important topic.
|
||||
The Argoproj team takes security very seriously and continuously working on improving it. Learn more about security
|
||||
related features in [Security](./operator-manual/security.md) section.
|
||||
|
||||
## Overview of past and current issues
|
||||
|
||||
The following table gives a general overview about past and present issues known
|
||||
to the ArgoCD project. See in the [Known Issues](#known-issues-and-workarounds)
|
||||
section if there is a work-around available if you cannot update or if there is
|
||||
no fix yet.
|
||||
|
||||
|Date|CVE|Title|Risk|Affected version(s)|Fix version|
|
||||
|----|---|-----|----|-------------------|-----------|
|
||||
|2020-04-14|[CVE-2020-5260](https://nvd.nist.gov/vuln/detail/CVE-2020-5260)|Possible Git credential leak|Critical|all|v1.4.3,v1.5.2|
|
||||
|2020-04-08|[CVE-2020-11576](https://nvd.nist.gov/vuln/detail/CVE-2020-11576)|User Enumeration|Medium|v1.5.0|v1.5.1|
|
||||
|2020-04-08|[CVE-2020-8826](https://nvd.nist.gov/vuln/detail/CVE-2020-8826)|Session-fixation|High|all|n/a|
|
||||
|2020-04-08|[CVE-2020-8827](https://nvd.nist.gov/vuln/detail/CVE-2020-8827)|Insufficient anti-automation/anti-brute force|High|all|n/a|
|
||||
|2020-04-08|[CVE-2020-8828](https://nvd.nist.gov/vuln/detail/CVE-2020-8828)|Insecure default administrative password|High|all|n/a|
|
||||
|2020-04-08|[CVE-2018-21034](https://nvd.nist.gov/vuln/detail/CVE-2018-21034)|Sensitive Information Disclosure|Medium|all <= v1.5.0|v1.5.0|
|
||||
|
||||
## Known Issues And Workarounds
|
||||
|
||||
A recent security audit (thanks a lot to [Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai) )
|
||||
has revealed several limitations in Argo CD which could compromise security.
|
||||
Most of the issues are related to the built-in user management implementation.
|
||||
|
||||
### CVE-2020-5260 - Possible Git credential leak
|
||||
|
||||
**Summary:**
|
||||
|
||||
|Risk|Reported by|Fix version|Workaround|
|
||||
|----|-----------|-----------|----------|
|
||||
|Critical|Felix Wilhelm of Google Project Zero|v1.4.3,v1.5.2|Yes|
|
||||
|
||||
**Details:**
|
||||
|
||||
ArgoCD relies on Git for many of its operations. The Git project released a
|
||||
[security advisory](https://github.com/git/git/security/advisories/GHSA-qm7j-c969-7j4q)
|
||||
on 2020-04-14, describing a serious vulnerability in Git which can lead to credential
|
||||
leakage through credential helpers by feeding malicious URLs to the `git clone`
|
||||
operation.
|
||||
|
||||
We do not believe ArgoCD is affected by this vulnerability, because ArgoCD does neither
|
||||
make use of Git credential helpers nor does it use `git clone` for repository operations.
|
||||
However, we do not know whether our users might have configured Git credential helpers on
|
||||
their own and chose to release new images which contain the bug fix for Git.
|
||||
|
||||
**Mitigation and/or workaround:**
|
||||
|
||||
We strongly recommend to upgrade your ArgoCD installation to either `v1.4.3` (if on v1.4
|
||||
branch) or `v1.5.2` (if on v1.5 branch)
|
||||
|
||||
|
||||
When you are running `v1.4.x`, you can upgrade to `v1.4.3` by simply changing the image
|
||||
tags for `argocd-server`, `argocd-repo-server` and `argocd-controller` to `v1.4.3`.
|
||||
The `v1.4.3` release does not contain additional functional bug fixes.
|
||||
|
||||
Likewise, hen you are running `v1.5.x`, you can upgrade to `v1.5.2` by simply changing
|
||||
the image tags for `argocd-server`, `argocd-repo-server` and `argocd-controller` to `v1.5.2`.
|
||||
The `v1.5.2` release does not contain additional functional bug fixes.
|
||||
|
||||
### CVE-2020-11576 - User Enumeration
|
||||
|
||||
**Summary:**
|
||||
|
||||
|Risk|Reported by|Fix version|Workaround|
|
||||
|----|-----------|-----------|----------|
|
||||
|Medium|[Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai)|v1.5.1|Yes|
|
||||
|
||||
**Details:**
|
||||
|
||||
Argo version v1.5.0 was vulnerable to a user-enumeration vulnerability which allowed attackers to determine the usernames of valid (non-SSO) accounts within Argo.
|
||||
|
||||
**Mitigation and/or workaround:**
|
||||
|
||||
Upgrade to ArgoCD v1.5.1 or higher. As a workaround, disable local users and use only SSO authentication.
|
||||
|
||||
### CVE-2020-8828 - Insecure default administrative password
|
||||
|
||||
**Summary:**
|
||||
|
||||
|Risk|Reported by|Fix version|Workaround|
|
||||
|----|-----------|-----------|----------|
|
||||
|High|[Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai)|n/a|Yes|
|
||||
|
||||
**Details:**
|
||||
|
||||
Argo CD uses the `argocd-server` pod name (ex: `argocd-server-55594fbdb9-ptsf5`) as the default admin password.
|
||||
|
||||
Kubernetes users able to list pods in the argo namespace are able to retrieve the default password.
|
||||
|
||||
Additionally, In most installations, [the Pod name contains a random "trail" of characters](https://github.com/kubernetes/kubernetes/blob/dda530cfb74b157f1d17b97818aa128a9db8e711/staging/src/k8s.io/apiserver/pkg/storage/names/generate.go#L37).
|
||||
These characters are generated using [a time-seeded PRNG](https://github.com/kubernetes/apimachinery/blob/master/pkg/util/rand/rand.go#L26) and not a CSPRNG.
|
||||
An attacker could use this information in an attempt to deduce the state of the internal PRNG, aiding bruteforce attacks.
|
||||
|
||||
**Mitigation and/or workaround:**
|
||||
|
||||
The recommended mitigation as described in the user documentation is to use SSO integration. The default admin password
|
||||
should only be used for initial configuration and then [disabled](https://argoproj.github.io/argo-cd/operator-manual/user-management/#disable-admin-user)
|
||||
or at least changed to a more secure password.
|
||||
|
||||
### CVE-2020-8827 - Insufficient anti-automation/anti-brute force
|
||||
|
||||
**Summary:**
|
||||
|
||||
|Risk|Reported by|Fix version|Workaround|
|
||||
|----|-----------|-----------|----------|
|
||||
|High|[Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai)|n/a|Yes|
|
||||
|
||||
**Details:**
|
||||
|
||||
Argo CD does not enforce rate-limiting or other anti-automation mechanisms which would mitigate admin password brute force.
|
||||
|
||||
We are considering some simple options for rate-limiting.
|
||||
|
||||
**Mitigation and/or workaround:**
|
||||
|
||||
As a workaround for mitigation until a fix is available, we recommend to disable local users and use SSO instead.
|
||||
|
||||
### CVE-2020-8826 - Session-fixation
|
||||
|
||||
**Summary:**
|
||||
|
||||
|Risk|Reported by|Fix version|Workaround|
|
||||
|----|-----------|-----------|----------|
|
||||
|High|[Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai)|n/a|Yes|
|
||||
|
||||
**Details:**
|
||||
|
||||
The authentication tokens generated for built-in users have no expiry.
|
||||
|
||||
These issues might be acceptable in the controlled isolated environment but not acceptable if Argo CD user interface is
|
||||
exposed to the Internet.
|
||||
|
||||
**Mitigation and/or workaround:**
|
||||
|
||||
The recommended mitigation is to change the password periodically to invalidate the authentication tokens.
|
||||
|
||||
### CVE-2018-21034 - Sensitive Information Disclosure
|
||||
|
||||
**Summary:**
|
||||
|
||||
|Risk|Reported by|Fix version|Workaround|
|
||||
|----|-----------|-----------|----------|
|
||||
|Medium|[Matt Hamilton](https://github.com/Eriner) of [https://soluble.ai](https://soluble.ai)|v1.5.0|No|
|
||||
|
||||
**Details:**
|
||||
|
||||
In Argo versions prior to v1.5.0-rc1, it was possible for authenticated Argo users to submit API calls to retrieve secrets and other manifests which were stored within git.
|
||||
|
||||
**Mitigation and/or workaround:**
|
||||
|
||||
Upgrade to ArgoCD v1.5.0 or higher. No workaround available
|
||||
|
||||
## Reporting Vulnerabilities
|
||||
|
||||
If you find a security related bug in ArgoCD, we kindly ask you for responsible
|
||||
disclosure and for giving us appropriate time to react, analyze and develop a
|
||||
fix to mitigate the found security vulnerability.
|
||||
|
||||
Please report security vulnerabilities by e-mailing:
|
||||
|
||||
* [Jesse_Suen@intuit.com](mailto:Jesse_Suen@intuit.com)
|
||||
* [Alexander_Matyushentsev@intuit.com](mailto:Alexander_Matyushentsev@intuit.com)
|
||||
* [Edward_Lee@intuit.com](mailto:Edward_Lee@intuit.com)
|
||||
@@ -7,7 +7,7 @@ Apps can be deleted with or without a cascade option. A **cascade delete**, dele
|
||||
To perform a non-cascade delete:
|
||||
|
||||
```bash
|
||||
argocd app delete APPNAME
|
||||
argocd app delete APPNAME --cascade=false
|
||||
```
|
||||
|
||||
To perform a cascade delete:
|
||||
@@ -16,6 +16,12 @@ To perform a cascade delete:
|
||||
argocd app delete APPNAME --cascade
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
argocd app delete APPNAME
|
||||
```
|
||||
|
||||
# Deletion Using `kubectl`
|
||||
|
||||
To perform a non-cascade delete:
|
||||
|
||||
@@ -37,6 +37,22 @@ spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
```
|
||||
|
||||
## Automatic Self-Healing
|
||||
By default, changes that are made to the live cluster will not trigger automated sync. To enable automatic sync
|
||||
when the live cluster's state deviates from the state defined in Git, run:
|
||||
|
||||
```bash
|
||||
argocd app set <APPNAME> --self-heal
|
||||
```
|
||||
|
||||
Or by setting the self heal option to true in the automated sync policy:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
```
|
||||
|
||||
@@ -48,7 +64,7 @@ spec:
|
||||
application parameters. If the most recent successful sync in the history was already performed
|
||||
against the same commit-SHA and parameters, a second sync will not be attempted, unless `selfHeal` flag is set to true.
|
||||
* If `selfHeal` flag is set to true then sync will be attempted again after self heal timeout (5 seconds by default)
|
||||
which is controller by `--self-heal-timeout-seconds` flag of `argocd-application-controller` deployment.
|
||||
which is controlled by `--self-heal-timeout-seconds` flag of `argocd-application-controller` deployment.
|
||||
* Automatic sync will not reattempt a sync if the previous sync attempt against the same commit-SHA
|
||||
and parameters had failed.
|
||||
|
||||
|
||||
@@ -67,8 +67,8 @@ bases:
|
||||
- github.com/argoproj/argo-cd//manifests/cluster-install
|
||||
```
|
||||
|
||||
The above kustomization has a remote base to he HEAD revision of the argo-cd repo. Since this
|
||||
is not stable target, the manifests for this kustomize application can suddenly change meaning, even without
|
||||
The above kustomization has a remote base to the HEAD revision of the argo-cd repo. Since this
|
||||
is not a stable target, the manifests for this kustomize application can suddenly change meaning, even without
|
||||
any changes to your own Git repository.
|
||||
|
||||
A better version would be to use a Git tag or commit SHA. For example:
|
||||
|
||||
@@ -9,4 +9,6 @@
|
||||
* `ARGOCD_APP_REVISION` - the resolved revision, e.g. `f913b6cbf58aa5ae5ca1f8a2b149477aebcbd9d8`
|
||||
* `ARGOCD_APP_SOURCE_PATH` - the path of the app within the repo
|
||||
* `ARGOCD_APP_SOURCE_REPO_URL` the repo's URL
|
||||
* `ARGOCD_APP_SOURCE_TARGET_REVISION` - the target revision from the spec, e.g. `master`.
|
||||
* `ARGOCD_APP_SOURCE_TARGET_REVISION` - the target revision from the spec, e.g. `master`.
|
||||
* `KUBE_VERSION` - the version of kubernetes
|
||||
* `KUBE_API_VERSIONS` = the version of kubernetes API
|
||||
@@ -11,7 +11,7 @@ submitted to Kubernetes in a manner which contradicts Git.
|
||||
which generates different data every time `helm template` is invoked.
|
||||
* For Horizontal Pod Autoscaling (HPA) objects, the HPA controller is known to reorder `spec.metrics`
|
||||
in a specific order. See [kubernetes issue #74099](https://github.com/kubernetes/kubernetes/issues/74099).
|
||||
To work around this, you can order `spec.replicas` in Git in the same order that the controller
|
||||
To work around this, you can order `spec.metrics` in Git in the same order that the controller
|
||||
prefers.
|
||||
|
||||
In case it is impossible to fix the upstream issue, Argo CD allows you to optionally ignore differences of problematic resources.
|
||||
@@ -19,7 +19,7 @@ The diffing customization can be configured for single or multiple application r
|
||||
|
||||
## Application Level Configuration
|
||||
|
||||
Argo CD allows ignoring differences at a specific JSON path. The following sample application is configured to ignore differences in `spec.replicas` for all deployments:
|
||||
Argo CD allows ignoring differences at a specific JSON path, using [RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902). The following sample application is configured to ignore differences in `spec.replicas` for all deployments:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
@@ -57,3 +57,48 @@ data:
|
||||
jsonPointers:
|
||||
- /webhooks/0/clientConfig/caBundle
|
||||
```
|
||||
|
||||
## Known Kubernetes types in CRDs (Resource limits, Volume mounts etc)
|
||||
|
||||
Some CRDs are re-using data structures defined in the Kubernetes source base and therefore inheriting custom
|
||||
JSON/YAML marshaling. Custom marshalers might serialize CRDs in a slightly different format that causes false
|
||||
positives during drift detection.
|
||||
|
||||
A typical example is the `argoproj.io/Rollout` CRD that re-using `core/v1/PodSpec` data structure. Pod resource requests
|
||||
might be reformatted by the custom marshaller of `IntOrString` data type:
|
||||
|
||||
from:
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
```
|
||||
|
||||
to:
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
cpu: 0.1
|
||||
```
|
||||
|
||||
The solution is to specify which CRDs fields are using built-in Kubernetes types in the `resource.customizations`
|
||||
section of `argocd-cm` ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
namespace: argocd
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-cm
|
||||
app.kubernetes.io/part-of: argocd
|
||||
data:
|
||||
resource.customizations: |
|
||||
argoproj.io/Rollout:
|
||||
knownTypeFields:
|
||||
- field: spec.template.spec
|
||||
type: core/v1/PodSpec
|
||||
```
|
||||
|
||||
The list of supported Kubernetes types is available in [diffing_known_types.txt](https://raw.githubusercontent.com/argoproj/argo-cd/master/util/argo/normalizers/diffing_known_types.txt)
|
||||
|
||||
@@ -86,7 +86,7 @@ Unsupported hooks are ignored. In Argo CD, hooks are created by using `kubectl a
|
||||
* Annotate `pre-install` and `post-install` with `hook-weight: "-1"`. This will make sure it runs to success before any upgrade hooks.
|
||||
* Annotate `pre-upgrade` and `post-upgrade` with `hook-delete-policy: before-hook-creation` to make sure it runs on every sync.
|
||||
|
||||
Read more about [Argo hooks](resource_hooks.md) and [Helm hooks](https://github.com/kubernetes/helm/blob/master/docs/charts_hooks.md).
|
||||
Read more about [Argo hooks](resource_hooks.md) and [Helm hooks](https://github.com/helm/helm/blob/dev-v2/docs/charts_hooks.md).
|
||||
|
||||
## Random Data
|
||||
|
||||
@@ -118,7 +118,7 @@ argocd app set redis -p password=abc123
|
||||
|
||||
> v1.4
|
||||
|
||||
Helm apps have access to the [standard build environment](build-environment.md) via substitution as parameters.
|
||||
Helm apps have access to the [standard build environment](build-environment.md) via substitution as parameters.
|
||||
|
||||
E.g. via the CLI:
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user