mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
123 Commits
commit-ser
...
v2.10.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5498456fa8 | ||
|
|
7bd0c3669f | ||
|
|
c9a7c0cd47 | ||
|
|
a80f192b4f | ||
|
|
3d900c7084 | ||
|
|
a3e235907a | ||
|
|
6e33cba80e | ||
|
|
1200b6c42d | ||
|
|
f52dcf6f3c | ||
|
|
6b0060587d | ||
|
|
9865a8a340 | ||
|
|
b3c2bc114b | ||
|
|
286568e73a | ||
|
|
5b041c2af2 | ||
|
|
a1d375836e | ||
|
|
c02a3d775c | ||
|
|
3bf801f2df | ||
|
|
ef535230d8 | ||
|
|
14963d7fac | ||
|
|
46c0c0b64d | ||
|
|
794b2e050d | ||
|
|
d8ddce87be | ||
|
|
7e99a1340e | ||
|
|
63a30293fe | ||
|
|
2fbb69b892 | ||
|
|
9c711337e7 | ||
|
|
2c2064be7b | ||
|
|
9d8148bc61 | ||
|
|
9ba6943111 | ||
|
|
c79714d660 | ||
|
|
cb6f5ac8b8 | ||
|
|
ec35043a64 | ||
|
|
531123b70c | ||
|
|
27e49f8b78 | ||
|
|
82ae349929 | ||
|
|
f61f47264f | ||
|
|
0a179fb98e | ||
|
|
a960c6be07 | ||
|
|
0895ebc135 | ||
|
|
2de0ceade2 | ||
|
|
bdd889d439 | ||
|
|
d58c96b456 | ||
|
|
5425568bd1 | ||
|
|
320ced67b8 | ||
|
|
f2d31330ff | ||
|
|
fa7f330ab3 | ||
|
|
1466755aeb | ||
|
|
8d267c0136 | ||
|
|
c071af8081 | ||
|
|
04785a4861 | ||
|
|
37b1cf5306 | ||
|
|
15865b9a04 | ||
|
|
47a35c1a11 | ||
|
|
9e5cc5a26f | ||
|
|
19addbd9bb | ||
|
|
744df40552 | ||
|
|
b060053b09 | ||
|
|
696ca0a57f | ||
|
|
c514105af7 | ||
|
|
d504d2b1d9 | ||
|
|
5814864d6c | ||
|
|
da65596511 | ||
|
|
73be9c4c2c | ||
|
|
d124f1603e | ||
|
|
335875d13e | ||
|
|
4192e3f3ac | ||
|
|
3e5a878f6e | ||
|
|
47d586169f | ||
|
|
f5d63a5c77 | ||
|
|
ce04dc5c6f | ||
|
|
cebb6538f7 | ||
|
|
ab7e45da13 | ||
|
|
a8ae929d55 | ||
|
|
f3fdaa7eab | ||
|
|
0b4659c046 | ||
|
|
0fd6344537 | ||
|
|
0977f61554 | ||
|
|
3dd069b049 | ||
|
|
37da5e2ae5 | ||
|
|
12886657ac | ||
|
|
fcf5d8c238 | ||
|
|
1ee3c80bc8 | ||
|
|
a79fcad0e9 | ||
|
|
67e57a47a2 | ||
|
|
d99ee9d28b | ||
|
|
28a9225e7b | ||
|
|
f5d6b2972b | ||
|
|
06e2e0da9a | ||
|
|
a79e0eaca4 | ||
|
|
65461a1b61 | ||
|
|
2268f08819 | ||
|
|
a1a5c58a7d | ||
|
|
9c379af169 | ||
|
|
4e01115a48 | ||
|
|
eddf0a5f30 | ||
|
|
2175939ed6 | ||
|
|
1c959b7b0d | ||
|
|
dfba4098d1 | ||
|
|
1835210574 | ||
|
|
b78befe441 | ||
|
|
e932556758 | ||
|
|
28c29380d4 | ||
|
|
18f82913b6 | ||
|
|
d9ece9295e | ||
|
|
0c62f6d6b9 | ||
|
|
14b8762684 | ||
|
|
c6469aef7e | ||
|
|
a848105a6d | ||
|
|
b657e97448 | ||
|
|
00fae11d99 | ||
|
|
c3d125f616 | ||
|
|
1041086231 | ||
|
|
da10276c8d | ||
|
|
4d0f940e04 | ||
|
|
8b865d7e30 | ||
|
|
a6d8c924ee | ||
|
|
3c5878ecf4 | ||
|
|
12f2252700 | ||
|
|
6f6a9a940b | ||
|
|
3ca67858f0 | ||
|
|
3f18c21c07 | ||
|
|
3ebcca66f3 | ||
|
|
d9196060c2 |
14
.github/workflows/ci-build.yaml
vendored
14
.github/workflows/ci-build.yaml
vendored
@@ -52,7 +52,7 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -270,7 +270,7 @@ jobs:
|
||||
node-version: '20.7.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -305,7 +305,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -401,7 +401,7 @@ jobs:
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -429,7 +429,7 @@ jobs:
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.37.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.14-alpine
|
||||
docker pull redis:7.0.15-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
|
||||
4
.github/workflows/image-reuse.yaml
vendored
4
.github/workflows/image-reuse.yaml
vendored
@@ -74,9 +74,7 @@ jobs:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@1fc5bd396d372bee37d608f955b336615edf79c8 # v3.2.0
|
||||
with:
|
||||
cosign-release: 'v2.2.1'
|
||||
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
|
||||
|
||||
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
|
||||
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
|
||||
2
.github/workflows/image.yaml
vendored
2
.github/workflows/image.yaml
vendored
@@ -86,7 +86,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: ghcr.io/argoproj/argo-cd/argocd
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
|
||||
42
.github/workflows/release.yaml
vendored
42
.github/workflows/release.yaml
vendored
@@ -31,20 +31,20 @@ jobs:
|
||||
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
argocd-image-provenance:
|
||||
needs: [argocd-image]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
secrets:
|
||||
registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
registry-password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
needs: [argocd-image]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
secrets:
|
||||
registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
registry-password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
goreleaser:
|
||||
needs:
|
||||
@@ -87,6 +87,14 @@ jobs:
|
||||
echo "KUBECTL_VERSION=$(go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
|
||||
id: run-goreleaser
|
||||
@@ -120,7 +128,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
@@ -203,8 +211,8 @@ jobs:
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
|
||||
@@ -114,7 +114,7 @@ changelog:
|
||||
exclude:
|
||||
- '^test:'
|
||||
- '^.*?Bump(\([[:word:]]+\))?.+$'
|
||||
- '^.*?[Bot](\([[:word:]]+\))?.+$'
|
||||
- '^.*?\[Bot\](\([[:word:]]+\))?.+$'
|
||||
|
||||
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
|
||||
@@ -4,9 +4,9 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fca
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS builder
|
||||
FROM docker.io/library/golang:1.21.10@sha256:16438a8e66c0c984f732e815ee5b7d715b8e33e81bac6d6a3750b1067744e7ca AS builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
openssh-server \
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.10@sha256:16438a8e66c0c984f732e815ee5b7d715b8e33e81bac6d6a3750b1067744e7ca AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -49,7 +49,7 @@ ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=60m
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,4 +1,4 @@
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
|
||||
2
USERS.md
2
USERS.md
@@ -40,6 +40,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Bulder Bank](https://bulderbank.no)
|
||||
1. [CAM](https://cam-inc.co.jp)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Candis](https://www.candis.io)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
@@ -233,6 +234,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [QuintoAndar](https://quintoandar.com.br)
|
||||
1. [Quipper](https://www.quipper.com/)
|
||||
1. [RapidAPI](https://www.rapidapi.com/)
|
||||
1. [rebuy](https://www.rebuy.de/)
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [Redpill Linpro](https://www.redpill-linpro.com/)
|
||||
|
||||
@@ -50,6 +50,7 @@ import (
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
)
|
||||
@@ -666,7 +667,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, found, func() error {
|
||||
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, normalizers.IgnoreNormalizerOpts{}, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function
|
||||
@@ -35,7 +36,7 @@ import (
|
||||
// The MutateFn is called regardless of creating or updating an object.
|
||||
//
|
||||
// It returns the executed operation and an error.
|
||||
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
|
||||
key := client.ObjectKeyFromObject(obj)
|
||||
if err := c.Get(ctx, key, obj); err != nil {
|
||||
@@ -60,7 +61,7 @@ func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ign
|
||||
|
||||
// Apply ignoreApplicationDifferences rules to remove ignored fields from both the live and the desired state. This
|
||||
// prevents those differences from appearing in the diff and therefore in the patch.
|
||||
err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj)
|
||||
err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj, ignoreNormalizerOpts)
|
||||
if err != nil {
|
||||
return controllerutil.OperationResultNone, fmt.Errorf("failed to apply ignore differences: %w", err)
|
||||
}
|
||||
@@ -134,14 +135,14 @@ func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object)
|
||||
}
|
||||
|
||||
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the applications in place.
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application) error {
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) error {
|
||||
if len(applicationSetIgnoreDifferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
generatedAppCopy := generatedApp.DeepCopy()
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false, ignoreNormalizerOpts).
|
||||
WithNoCache().
|
||||
Build()
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
func Test_applyIgnoreDifferences(t *testing.T) {
|
||||
@@ -222,7 +223,7 @@ spec:
|
||||
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
|
||||
require.NoError(t, err, tc.generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp, normalizers.IgnoreNormalizerOpts{})
|
||||
require.NoError(t, err)
|
||||
yamlFound, err := yaml.Marshal(tc.foundApp)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -7405,6 +7405,7 @@
|
||||
"properties": {
|
||||
"elements": {
|
||||
"type": "array",
|
||||
"title": "+kubebuilder:validation:Optional",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1JSON"
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -20,19 +19,18 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/argoproj/argo-cd/v2/util/tls"
|
||||
"github.com/argoproj/argo-cd/v2/util/trace"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -50,6 +48,7 @@ func NewCommand() *cobra.Command {
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
appResyncJitter int64
|
||||
repoErrorGracePeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
@@ -74,6 +73,7 @@ func NewCommand() *cobra.Command {
|
||||
shardingAlgorithm string
|
||||
enableDynamicClusterDistribution bool
|
||||
serverSideDiff bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -146,7 +146,8 @@ func NewCommand() *cobra.Command {
|
||||
appController.InvalidateProjectsCache()
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -157,6 +158,7 @@ func NewCommand() *cobra.Command {
|
||||
kubectl,
|
||||
resyncDuration,
|
||||
hardResyncDuration,
|
||||
time.Duration(appResyncJitter)*time.Second,
|
||||
time.Duration(selfHealTimeoutSeconds)*time.Second,
|
||||
time.Duration(repoErrorGracePeriod)*time.Second,
|
||||
metricsPort,
|
||||
@@ -164,10 +166,12 @@ func NewCommand() *cobra.Command {
|
||||
metricsAplicationLabels,
|
||||
kubectlParallelismLimit,
|
||||
persistResourceHealth,
|
||||
clusterFilter,
|
||||
clusterSharding,
|
||||
applicationNamespaces,
|
||||
&workqueueRateLimit,
|
||||
serverSideDiff,
|
||||
enableDynamicClusterDistribution,
|
||||
ignoreNormalizerOpts,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
@@ -194,6 +198,7 @@ func NewCommand() *cobra.Command {
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.")
|
||||
command.Flags().Int64Var(&appHardResyncPeriod, "app-hard-resync", int64(env.ParseDurationFromEnv("ARGOCD_HARD_RECONCILIATION_TIMEOUT", defaultAppHardResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application hard resync.")
|
||||
command.Flags().Int64Var(&appResyncJitter, "app-resync-jitter", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_JITTER", 0*time.Second, 0, math.MaxInt64).Seconds()), "Maximum time period in seconds to add as a delay jitter for application resync.")
|
||||
command.Flags().Int64Var(&repoErrorGracePeriod, "repo-error-grace-period-seconds", int64(env.ParseDurationFromEnv("ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Grace period in seconds for ignoring consecutive errors while communicating with repo server.")
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
|
||||
@@ -218,7 +223,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
|
||||
// global queue rate limit config
|
||||
command.Flags().Int64Var(&workqueueRateLimit.BucketSize, "wq-bucket-size", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_SIZE", 500, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket Size, default 500")
|
||||
command.Flags().Int64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_QPS", 50, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket QPS, default 50")
|
||||
command.Flags().Float64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseFloat64FromEnv("WORKQUEUE_BUCKET_QPS", math.MaxFloat64, 1, math.MaxFloat64), "Set Workqueue Rate Limiter Bucket QPS, default set to MaxFloat64 which disables the bucket limiter")
|
||||
// individual item rate limit config
|
||||
// when WORKQUEUE_FAILURE_COOLDOWN is 0 per item rate limiting is disabled(default)
|
||||
command.Flags().DurationVar(&workqueueRateLimit.FailureCoolDown, "wq-cooldown-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_FAILURE_COOLDOWN_NS", 0, 0, (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)")
|
||||
@@ -227,63 +232,9 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().Float64Var(&workqueueRateLimit.BackoffFactor, "wq-backoff-factor", env.ParseFloat64FromEnv("WORKQUEUE_BACKOFF_FACTOR", 1.5, 0, math.MaxFloat64), "Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5")
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff-enabled", env.ParseBoolFromEnv(common.EnvServerSideDiff, false), "Feature flag to enable ServerSide diff. Default (\"false\")")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {
|
||||
|
||||
var replicas int
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
|
||||
if err != nil && kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
}
|
||||
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicas = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
var clusterFilter func(cluster *v1alpha1.Cluster) bool
|
||||
if replicas > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil {
|
||||
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard)
|
||||
if !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shard < 0 {
|
||||
var err error
|
||||
shard, err = sharding.InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Processing clusters from shard %d", shard)
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm)
|
||||
clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
return clusterFilter
|
||||
}
|
||||
|
||||
@@ -68,6 +68,7 @@ func NewCommand() *cobra.Command {
|
||||
streamedManifestMaxTarSize string
|
||||
streamedManifestMaxExtractedSize string
|
||||
helmManifestMaxExtractedSize string
|
||||
helmRegistryMaxIndexSize string
|
||||
disableManifestMaxExtractedSize bool
|
||||
)
|
||||
var command = cobra.Command{
|
||||
@@ -110,6 +111,9 @@ func NewCommand() *cobra.Command {
|
||||
helmManifestMaxExtractedSizeQuantity, err := resource.ParseQuantity(helmManifestMaxExtractedSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
helmRegistryMaxIndexSizeQuantity, err := resource.ParseQuantity(helmRegistryMaxIndexSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
askPassServer := askpass.NewServer()
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
cacheutil.CollectMetrics(redisClient, metricsServer)
|
||||
@@ -125,6 +129,7 @@ func NewCommand() *cobra.Command {
|
||||
StreamedManifestMaxExtractedSize: streamedManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
StreamedManifestMaxTarSize: streamedManifestMaxTarSizeQuantity.ToDec().Value(),
|
||||
HelmManifestMaxExtractedSize: helmManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
HelmRegistryMaxIndexSize: helmRegistryMaxIndexSizeQuantity.ToDec().Value(),
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -208,6 +213,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&streamedManifestMaxTarSize, "streamed-manifest-max-tar-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_TAR_SIZE", "100M"), "Maximum size of streamed manifest archives")
|
||||
command.Flags().StringVar(&streamedManifestMaxExtractedSize, "streamed-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of streamed manifest archives when extracted")
|
||||
command.Flags().StringVar(&helmManifestMaxExtractedSize, "helm-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of helm manifest archives when extracted")
|
||||
command.Flags().StringVar(&helmRegistryMaxIndexSize, "helm-registry-max-index-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_INDEX_SIZE", "1G"), "Maximum size of registry index file")
|
||||
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
@@ -61,6 +62,7 @@ func NewCommand() *cobra.Command {
|
||||
repoServerAddress string
|
||||
dexServerAddress string
|
||||
disableAuth bool
|
||||
contentTypes string
|
||||
enableGZip bool
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
cacheSrc func() (*servercache.Cache, error)
|
||||
@@ -165,6 +167,11 @@ func NewCommand() *cobra.Command {
|
||||
baseHRef = rootPath
|
||||
}
|
||||
|
||||
var contentTypesList []string
|
||||
if contentTypes != "" {
|
||||
contentTypesList = strings.Split(contentTypes, ";")
|
||||
}
|
||||
|
||||
argoCDOpts := server.ArgoCDServerOpts{
|
||||
Insecure: insecure,
|
||||
ListenPort: listenPort,
|
||||
@@ -180,6 +187,7 @@ func NewCommand() *cobra.Command {
|
||||
DexServerAddr: dexServerAddress,
|
||||
DexTLSConfig: dexTlsConfig,
|
||||
DisableAuth: disableAuth,
|
||||
ContentTypes: contentTypesList,
|
||||
EnableGZip: enableGZip,
|
||||
TLSConfigCustomizer: tlsConfigCustomizer,
|
||||
Cache: cache,
|
||||
@@ -234,6 +242,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
|
||||
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json", env.StringFromEnvOpts{AllowEmpty: true}), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
|
||||
|
||||
@@ -141,6 +141,7 @@ $ argocd admin initial-password reset
|
||||
command.AddCommand(NewDashboardCommand(clientOpts))
|
||||
command.AddCommand(NewNotificationsCommand())
|
||||
command.AddCommand(NewInitialPasswordCommand())
|
||||
command.AddCommand(NewRedisInitialPasswordCommand())
|
||||
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
appinformers "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions"
|
||||
reposerverclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
@@ -238,12 +239,13 @@ func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error {
|
||||
|
||||
func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
selector string
|
||||
repoServerAddress string
|
||||
outputFormat string
|
||||
refresh bool
|
||||
serverSideDiff bool
|
||||
clientConfig clientcmd.ClientConfig
|
||||
selector string
|
||||
repoServerAddress string
|
||||
outputFormat string
|
||||
refresh bool
|
||||
serverSideDiff bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
|
||||
var command = &cobra.Command{
|
||||
@@ -281,7 +283,7 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
|
||||
appClientset := appclientset.NewForConfigOrDie(cfg)
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(cfg)
|
||||
result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache, serverSideDiff)
|
||||
result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache, serverSideDiff, ignoreNormalizerOpts)
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
appClientset := appclientset.NewForConfigOrDie(cfg)
|
||||
@@ -297,7 +299,7 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
command.Flags().StringVar(&outputFormat, "o", "yaml", "Output format (yaml|json)")
|
||||
command.Flags().BoolVar(&refresh, "refresh", false, "If set to true then recalculates apps reconciliation")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff", false, "If set to \"true\" will use server-side diff while comparing resources. Default (\"false\")")
|
||||
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -347,6 +349,7 @@ func reconcileApplications(
|
||||
selector string,
|
||||
createLiveStateCache func(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache,
|
||||
serverSideDiff bool,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
) ([]appReconcileResult, error) {
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, namespace)
|
||||
argoDB := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
@@ -387,7 +390,7 @@ func reconcileApplications(
|
||||
)
|
||||
|
||||
appStateManager := controller.NewAppStateManager(
|
||||
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false, 0, serverSideDiff)
|
||||
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false, 0, serverSideDiff, ignoreNormalizerOpts)
|
||||
|
||||
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
@@ -114,6 +115,7 @@ func TestGetReconcileResults_Refresh(t *testing.T) {
|
||||
return &liveStateCache
|
||||
},
|
||||
false,
|
||||
normalizers.IgnoreNormalizerOpts{},
|
||||
)
|
||||
|
||||
if !assert.NoError(t, err) {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
@@ -78,7 +79,7 @@ type ClusterWithInfo struct {
|
||||
Namespaces []string
|
||||
}
|
||||
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, shardingAlgorithm string, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
|
||||
|
||||
argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
|
||||
@@ -86,6 +87,10 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusterShardingCache := sharding.NewClusterSharding(argoDB, shard, replicas, shardingAlgorithm)
|
||||
clusterShardingCache.Init(clustersList)
|
||||
clusterShards := clusterShardingCache.GetDistribution()
|
||||
|
||||
var cache *appstatecache.Cache
|
||||
if portForwardRedis {
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
@@ -96,7 +101,17 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", port)})
|
||||
|
||||
redisOptions := &redis.Options{Addr: fmt.Sprintf("localhost:%d", port)}
|
||||
|
||||
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(context.Background(), defaulRedisInitialPasswordSecretName, v1.GetOptions{})
|
||||
if err == nil {
|
||||
if _, ok := secret.Data[defaultResisInitialPasswordKey]; ok {
|
||||
redisOptions.Password = string(secret.Data[defaultResisInitialPasswordKey])
|
||||
}
|
||||
}
|
||||
|
||||
client := redis.NewClient(redisOptions)
|
||||
compressionType, err := cacheutil.CompressionTypeFromString(redisCompressionStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -122,8 +137,15 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
apps[i] = app
|
||||
}
|
||||
clusters := make([]ClusterWithInfo, len(clustersList.Items))
|
||||
|
||||
batchSize := 10
|
||||
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
|
||||
clusterSharding := &sharding.ClusterSharding{
|
||||
Shard: shard,
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
}
|
||||
for batchNum := 0; batchNum < batchesCount; batchNum++ {
|
||||
batchStart := batchSize * batchNum
|
||||
batchEnd := batchSize * (batchNum + 1)
|
||||
@@ -135,12 +157,12 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
clusterShard := 0
|
||||
cluster := batch[i]
|
||||
if replicas > 0 {
|
||||
distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(clusterSharding.GetClusterAccessor(), common.DefaultShardingAlgorithm, replicas)
|
||||
distributionFunction(&cluster)
|
||||
clusterShard := clusterShards[cluster.Server]
|
||||
cluster.Shard = pointer.Int64(int64(clusterShard))
|
||||
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
|
||||
}
|
||||
|
||||
if shard != -1 && clusterShard != shard {
|
||||
return nil
|
||||
}
|
||||
@@ -176,6 +198,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
@@ -183,7 +206,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "shards",
|
||||
Short: "Print information about each controller shard and portion of Kubernetes resources it is responsible for.",
|
||||
Short: "Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -203,8 +226,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
if replicas == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
errors.CheckError(err)
|
||||
if len(clusters) == 0 {
|
||||
return
|
||||
@@ -216,7 +238,9 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
|
||||
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
|
||||
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
|
||||
|
||||
// parse all added flags so far to get the redis-compression flag that was added by AddCacheFlagsToCmd() above
|
||||
@@ -461,6 +485,7 @@ func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
@@ -494,7 +519,7 @@ argocd admin cluster stats target-cluster`,
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
errors.CheckError(err)
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
@@ -508,6 +533,7 @@ argocd admin cluster stats target-cluster`,
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
|
||||
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
|
||||
|
||||
|
||||
98
cmd/argocd/commands/admin/redis_initial_password.go
Normal file
98
cmd/argocd/commands/admin/redis_initial_password.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const defaulRedisInitialPasswordSecretName = "argocd-redis"
|
||||
const defaultResisInitialPasswordKey = "auth"
|
||||
|
||||
func generateRandomPassword() (string, error) {
|
||||
const initialPasswordLength = 16
|
||||
const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
|
||||
randBytes := make([]byte, initialPasswordLength)
|
||||
for i := 0; i < initialPasswordLength; i++ {
|
||||
num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
randBytes[i] = letters[num.Int64()]
|
||||
}
|
||||
initialPassword := string(randBytes)
|
||||
return initialPassword, nil
|
||||
}
|
||||
|
||||
// NewRedisInitialPasswordCommand defines a new command to ensure Argo CD Redis password secret exists.
|
||||
func NewRedisInitialPasswordCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "redis-initial-password",
|
||||
Short: "Ensure the Redis password exists, creating a new one if necessary.",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
errors.CheckError(err)
|
||||
|
||||
redisInitialPasswordSecretName := defaulRedisInitialPasswordSecretName
|
||||
redisInitialPasswordKey := defaultResisInitialPasswordKey
|
||||
fmt.Printf("Checking for initial Redis password in secret %s/%s at key %s. \n", namespace, redisInitialPasswordSecretName, redisInitialPasswordKey)
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
|
||||
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(config)
|
||||
|
||||
randomPassword, err := generateRandomPassword()
|
||||
errors.CheckError(err)
|
||||
|
||||
data := map[string][]byte{
|
||||
redisInitialPasswordKey: []byte(randomPassword),
|
||||
}
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: redisInitialPasswordSecretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: data,
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
}
|
||||
_, err = kubeClientset.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
|
||||
if err != nil && !apierr.IsAlreadyExists(err) {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
fmt.Println("Argo CD Redis secret state confirmed: secret name argocd-redis.")
|
||||
secret, err = kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), redisInitialPasswordSecretName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
|
||||
if _, ok := secret.Data[redisInitialPasswordKey]; ok {
|
||||
fmt.Println("Password secret is configured properly.")
|
||||
} else {
|
||||
err := fmt.Errorf("key %s doesn't exist in secret %s. \n", redisInitialPasswordKey, redisInitialPasswordSecretName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
|
||||
return &command
|
||||
}
|
||||
@@ -428,7 +428,7 @@ argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argo
|
||||
// configurations. This requires access to live resources which is not the
|
||||
// purpose of this command. This will just apply jsonPointers and
|
||||
// jqPathExpressions configurations.
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides)
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides, normalizers.IgnoreNormalizerOpts{})
|
||||
errors.CheckError(err)
|
||||
|
||||
normalizedRes := res.DeepCopy()
|
||||
@@ -453,6 +453,9 @@ argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argo
|
||||
}
|
||||
|
||||
func NewResourceIgnoreResourceUpdatesCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var (
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "ignore-resource-updates RESOURCE_YAML_PATH",
|
||||
Short: "Renders fields excluded from resource updates",
|
||||
@@ -474,7 +477,7 @@ argocd admin settings resource-overrides ignore-resource-updates ./deploy.yaml -
|
||||
return
|
||||
}
|
||||
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides)
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides, ignoreNormalizerOpts)
|
||||
errors.CheckError(err)
|
||||
|
||||
normalizedRes := res.DeepCopy()
|
||||
@@ -495,6 +498,7 @@ argocd admin settings resource-overrides ignore-resource-updates ./deploy.yaml -
|
||||
})
|
||||
},
|
||||
}
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/repository"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
@@ -1049,14 +1050,15 @@ type objKeyLiveTarget struct {
|
||||
// NewApplicationDiffCommand returns a new instance of an `argocd app diff` command
|
||||
func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
exitCode bool
|
||||
local string
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
localIncludes []string
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
exitCode bool
|
||||
local string
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
localIncludes []string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
shortDesc := "Perform a diff against the target and live state."
|
||||
var command = &cobra.Command{
|
||||
@@ -1116,13 +1118,14 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
defer argoio.Close(conn)
|
||||
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
|
||||
errors.CheckError(err)
|
||||
|
||||
diffOption.local = local
|
||||
diffOption.localRepoRoot = localRepoRoot
|
||||
diffOption.cluster = cluster
|
||||
}
|
||||
}
|
||||
proj := getProject(c, clientOpts, ctx, app.Spec.Project)
|
||||
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption)
|
||||
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
if foundDiffs && exitCode {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -1136,6 +1139,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
|
||||
command.Flags().BoolVar(&serverSideGenerate, "server-side-generate", false, "Used with --local, this will send your manifests to the server for diffing")
|
||||
command.Flags().StringArrayVar(&localIncludes, "local-include", []string{"*.yaml", "*.yml", "*.json"}, "Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path.")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1150,7 +1154,7 @@ type DifferenceOption struct {
|
||||
}
|
||||
|
||||
// findandPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
|
||||
func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption) bool {
|
||||
func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) bool {
|
||||
var foundDiffs bool
|
||||
liveObjs, err := cmdutil.LiveObjects(resources.Items)
|
||||
errors.CheckError(err)
|
||||
@@ -1205,7 +1209,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
// compareOptions in the protobuf
|
||||
ignoreAggregatedRoles := false
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, overrides, ignoreAggregatedRoles).
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, overrides, ignoreAggregatedRoles, ignoreNormalizerOpts).
|
||||
WithTracking(argoSettings.AppLabelKey, argoSettings.TrackingMethod).
|
||||
WithNoCache().
|
||||
Build()
|
||||
@@ -1698,6 +1702,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
diffChangesConfirm bool
|
||||
projects []string
|
||||
output string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "sync [APPNAME... | -l selector | --project project-name]",
|
||||
@@ -1922,7 +1927,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
fmt.Printf("====== Previewing differences between live and desired state of application %s ======\n", appQualifiedName)
|
||||
|
||||
proj := getProject(c, clientOpts, ctx, app.Spec.Project)
|
||||
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption)
|
||||
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
if foundDiffs {
|
||||
if !diffChangesConfirm {
|
||||
yesno := cli.AskToProceed(fmt.Sprintf("Please review changes to application %s shown above. Do you want to continue the sync process? (y/n): ", appQualifiedName))
|
||||
@@ -1980,6 +1985,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&diffChanges, "preview-changes", false, "Preview difference against the target and live state before syncing app and wait for user confirmation")
|
||||
command.Flags().StringArrayVar(&projects, "project", []string{}, "Sync apps that belong to the specified projects. This option may be specified repeatedly.")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -46,13 +46,13 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
|
||||
|
||||
serverLog := log.NewEntry(log.StandardLogger())
|
||||
streamInterceptors := []grpc.StreamServerInterceptor{
|
||||
otelgrpc.StreamServerInterceptor(),
|
||||
otelgrpc.StreamServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
|
||||
grpc_logrus.StreamServerInterceptor(serverLog),
|
||||
grpc_prometheus.StreamServerInterceptor,
|
||||
grpc_util.PanicLoggerStreamServerInterceptor(serverLog),
|
||||
}
|
||||
unaryInterceptors := []grpc.UnaryServerInterceptor{
|
||||
otelgrpc.UnaryServerInterceptor(),
|
||||
otelgrpc.UnaryServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
|
||||
grpc_logrus.UnaryServerInterceptor(serverLog),
|
||||
grpc_prometheus.UnaryServerInterceptor,
|
||||
grpc_util.PanicLoggerUnaryServerInterceptor(serverLog),
|
||||
|
||||
@@ -115,9 +115,9 @@ const (
|
||||
LegacyShardingAlgorithm = "legacy"
|
||||
// RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards
|
||||
RoundRobinShardingAlgorithm = "round-robin"
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
// AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller
|
||||
AppControllerHeartbeatUpdateRetryCount = 3
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
)
|
||||
|
||||
// Dex related constants
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
@@ -54,6 +55,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -113,11 +115,11 @@ type ApplicationController struct {
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
appStateManager AppStateManager
|
||||
stateCache statecache.LiveStateCache
|
||||
statusRefreshTimeout time.Duration
|
||||
statusHardRefreshTimeout time.Duration
|
||||
statusRefreshJitter time.Duration
|
||||
selfHealTimeout time.Duration
|
||||
repoClientset apiclient.Clientset
|
||||
db db.ArgoDB
|
||||
@@ -126,9 +128,14 @@ type ApplicationController struct {
|
||||
refreshRequestedAppsMutex *sync.Mutex
|
||||
metricsServer *metrics.MetricsServer
|
||||
kubectlSemaphore *semaphore.Weighted
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
clusterSharding sharding.ClusterShardingCache
|
||||
projByNameCache sync.Map
|
||||
applicationNamespaces []string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
|
||||
// dynamicClusterDistributionEnabled if disabled deploymentInformer is never initialized
|
||||
dynamicClusterDistributionEnabled bool
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
}
|
||||
|
||||
// NewApplicationController creates new instance of ApplicationController.
|
||||
@@ -142,6 +149,7 @@ func NewApplicationController(
|
||||
kubectl kube.Kubectl,
|
||||
appResyncPeriod time.Duration,
|
||||
appHardResyncPeriod time.Duration,
|
||||
appResyncJitter time.Duration,
|
||||
selfHealTimeout time.Duration,
|
||||
repoErrorGracePeriod time.Duration,
|
||||
metricsPort int,
|
||||
@@ -149,39 +157,44 @@ func NewApplicationController(
|
||||
metricsApplicationLabels []string,
|
||||
kubectlParallelismLimit int64,
|
||||
persistResourceHealth bool,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
clusterSharding sharding.ClusterShardingCache,
|
||||
applicationNamespaces []string,
|
||||
rateLimiterConfig *ratelimiter.AppControllerRateLimiterConfig,
|
||||
serverSideDiff bool,
|
||||
dynamicClusterDistributionEnabled bool,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v", appResyncPeriod, appHardResyncPeriod)
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
if rateLimiterConfig == nil {
|
||||
rateLimiterConfig = ratelimiter.GetDefaultAppRateLimiterConfig()
|
||||
log.Info("Using default workqueue rate limiter config")
|
||||
}
|
||||
ctrl := ApplicationController{
|
||||
cache: argoCache,
|
||||
namespace: namespace,
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterFilter: clusterFilter,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
cache: argoCache,
|
||||
namespace: namespace,
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
statusRefreshJitter: appResyncJitter,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
dynamicClusterDistributionEnabled: dynamicClusterDistributionEnabled,
|
||||
ignoreNormalizerOpts: ignoreNormalizerOpts,
|
||||
}
|
||||
if kubectlParallelismLimit > 0 {
|
||||
ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
|
||||
@@ -224,25 +237,33 @@ func NewApplicationController(
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
|
||||
deploymentInformer := factory.Apps().V1().Deployments()
|
||||
|
||||
var deploymentInformer informerv1.DeploymentInformer
|
||||
|
||||
// only initialize deployment informer if dynamic distribution is enabled
|
||||
if dynamicClusterDistributionEnabled {
|
||||
deploymentInformer = factory.Apps().V1().Deployments()
|
||||
}
|
||||
|
||||
readinessHealthCheck := func(r *http.Request) error {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
if dynamicClusterDistributionEnabled {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -260,8 +281,8 @@ func NewApplicationController(
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff)
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
@@ -712,7 +733,7 @@ func (ctrl *ApplicationController) hideSecretData(app *appv1.Application, compar
|
||||
return nil, fmt.Errorf("error getting cluster cache: %s", err)
|
||||
}
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles, ctrl.ignoreNormalizerOpts).
|
||||
WithTracking(appLabelKey, trackingMethod).
|
||||
WithNoCache().
|
||||
WithLogger(logutils.NewLogrusLogger(logutils.NewWithCurrentConfig())).
|
||||
@@ -770,7 +791,18 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
|
||||
go ctrl.appInformer.Run(ctx.Done())
|
||||
go ctrl.projInformer.Run(ctx.Done())
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
|
||||
if ctrl.dynamicClusterDistributionEnabled {
|
||||
// only start deployment informer if dynamic distribution is enabled
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
}
|
||||
|
||||
clusters, err := ctrl.db.ListClusters(ctx)
|
||||
if err != nil {
|
||||
log.Warnf("Cannot init sharding. Error while querying clusters list from database: %v", err)
|
||||
} else {
|
||||
ctrl.clusterSharding.Init(clusters)
|
||||
}
|
||||
|
||||
errors.CheckError(ctrl.stateCache.Init())
|
||||
|
||||
@@ -1636,6 +1668,7 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
var reason string
|
||||
compareWith := CompareWithLatest
|
||||
refreshType := appv1.RefreshTypeNormal
|
||||
|
||||
softExpired := app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
|
||||
hardExpired := (app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusHardRefreshTimeout).Before(time.Now().UTC())) && statusHardRefreshTimeout.Seconds() != 0
|
||||
|
||||
@@ -1725,6 +1758,22 @@ func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Applica
|
||||
}
|
||||
}
|
||||
|
||||
func createMergePatch(orig, new interface{}) ([]byte, bool, error) {
|
||||
origBytes, err := json.Marshal(orig)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
newBytes, err := json.Marshal(new)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
patch, err := jsonpatch.CreateMergePatch(origBytes, newBytes)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return patch, string(patch) != "{}", nil
|
||||
}
|
||||
|
||||
// persistAppStatus persists updates to application status. If no changes were made, it is a no-op
|
||||
func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) (patchMs time.Duration) {
|
||||
logCtx := log.WithFields(log.Fields{"application": orig.QualifiedName()})
|
||||
@@ -1744,9 +1793,9 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
}
|
||||
delete(newAnnotations, appv1.AnnotationKeyRefresh)
|
||||
}
|
||||
patch, modified, err := diff.CreateTwoWayMergePatch(
|
||||
patch, modified, err := createMergePatch(
|
||||
&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: orig.GetAnnotations()}, Status: orig.Status},
|
||||
&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: newAnnotations}, Status: *newStatus}, appv1.Application{})
|
||||
&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: newAnnotations}, Status: *newStatus})
|
||||
if err != nil {
|
||||
logCtx.Errorf("Error constructing app status patch: %v", err)
|
||||
return
|
||||
@@ -1976,15 +2025,11 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
}
|
||||
}
|
||||
|
||||
if ctrl.clusterFilter != nil {
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return ctrl.clusterFilter(nil)
|
||||
}
|
||||
return ctrl.clusterFilter(cluster)
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return ctrl.clusterSharding.IsManagedCluster(nil)
|
||||
}
|
||||
|
||||
return true
|
||||
return ctrl.clusterSharding.IsManagedCluster(cluster)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) {
|
||||
@@ -2092,14 +2137,25 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var compareWith *CompareWith
|
||||
var delay *time.Duration
|
||||
|
||||
oldApp, oldOK := old.(*appv1.Application)
|
||||
newApp, newOK := new.(*appv1.Application)
|
||||
if oldOK && newOK && automatedSyncEnabled(oldApp, newApp) {
|
||||
log.WithField("application", newApp.QualifiedName()).Info("Enabled automated sync")
|
||||
compareWith = CompareWithLatest.Pointer()
|
||||
if oldOK && newOK {
|
||||
if automatedSyncEnabled(oldApp, newApp) {
|
||||
log.WithField("application", newApp.QualifiedName()).Info("Enabled automated sync")
|
||||
compareWith = CompareWithLatest.Pointer()
|
||||
}
|
||||
if ctrl.statusRefreshJitter != 0 && oldApp.ResourceVersion == newApp.ResourceVersion {
|
||||
// Handler is refreshing the apps, add a random jitter to spread the load and avoid spikes
|
||||
jitter := time.Duration(float64(ctrl.statusRefreshJitter) * rand.Float64())
|
||||
delay = &jitter
|
||||
}
|
||||
}
|
||||
ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, nil)
|
||||
|
||||
ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, delay)
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
@@ -2136,7 +2192,7 @@ func (ctrl *ApplicationController) projectErrorToCondition(err error, app *appv1
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) {
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterFilter, ctrl.getAppProj, ctrl.namespace)
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterSharding.IsManagedCluster, ctrl.getAppProj, ctrl.namespace)
|
||||
go updater.Run(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,9 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
|
||||
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -40,6 +42,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
@@ -142,6 +145,7 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
kubectl,
|
||||
time.Minute,
|
||||
time.Hour,
|
||||
time.Second,
|
||||
time.Minute,
|
||||
time.Second*10,
|
||||
common.DefaultPortArgoCDMetrics,
|
||||
@@ -153,7 +157,13 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
data.applicationNamespaces,
|
||||
nil,
|
||||
false,
|
||||
false,
|
||||
normalizers.IgnoreNormalizerOpts{},
|
||||
)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
// Setting a default sharding algorithm for the tests where we cannot set it.
|
||||
ctrl.clusterSharding = sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -686,7 +696,6 @@ func TestFinalizeAppDeletion(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(appObj): appObj,
|
||||
}}, nil)
|
||||
|
||||
patched := false
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
defaultReactor := fakeAppCs.ReactionChain[0]
|
||||
@@ -999,7 +1008,7 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
normalized := false
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"}}` {
|
||||
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"},"status":{"sync":{"comparedTo":{"destination":{},"source":{"repoURL":""}}}}}` {
|
||||
normalized = true
|
||||
}
|
||||
}
|
||||
@@ -1809,13 +1818,11 @@ func Test_canProcessApp(t *testing.T) {
|
||||
})
|
||||
t.Run("with cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
@@ -1904,3 +1911,65 @@ func TestAddControllerNamespace(t *testing.T) {
|
||||
assert.Equal(t, test.FakeArgoCDNamespace, updatedApp.Status.ControllerNamespace)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHelmValuesObjectHasReplaceStrategy(t *testing.T) {
|
||||
app := v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{Sync: v1alpha1.SyncStatus{ComparedTo: v1alpha1.ComparedTo{
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
ValuesObject: &runtime.RawExtension{
|
||||
Object: &unstructured.Unstructured{Object: map[string]interface{}{"key": []string{"value"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}},
|
||||
}
|
||||
|
||||
appModified := v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{Sync: v1alpha1.SyncStatus{ComparedTo: v1alpha1.ComparedTo{
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
ValuesObject: &runtime.RawExtension{
|
||||
Object: &unstructured.Unstructured{Object: map[string]interface{}{"key": []string{"value-modified1"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}},
|
||||
}
|
||||
|
||||
patch, _, err := createMergePatch(
|
||||
app,
|
||||
appModified)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `{"status":{"sync":{"comparedTo":{"source":{"helm":{"valuesObject":{"key":["value-modified1"]}}}}}}}`, string(patch))
|
||||
}
|
||||
|
||||
func TestAppStatusIsReplaced(t *testing.T) {
|
||||
original := &v1alpha1.ApplicationStatus{Sync: v1alpha1.SyncStatus{
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://mycluster",
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
updated := &v1alpha1.ApplicationStatus{Sync: v1alpha1.SyncStatus{
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Name: "mycluster",
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
patchData, ok, err := createMergePatch(original, updated)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
patchObj := map[string]interface{}{}
|
||||
require.NoError(t, json.Unmarshal(patchData, &patchObj))
|
||||
|
||||
val, has, err := unstructured.NestedFieldNoCopy(patchObj, "sync", "comparedTo", "destination", "server")
|
||||
require.NoError(t, err)
|
||||
require.True(t, has)
|
||||
require.Nil(t, val)
|
||||
}
|
||||
|
||||
60
controller/cache/cache.go
vendored
60
controller/cache/cache.go
vendored
@@ -29,9 +29,11 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
@@ -168,7 +170,7 @@ func NewLiveStateCache(
|
||||
kubectl kube.Kubectl,
|
||||
metricsServer *metrics.MetricsServer,
|
||||
onObjectUpdated ObjectUpdatedHandler,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
clusterSharding sharding.ClusterShardingCache,
|
||||
resourceTracking argo.ResourceTracking) LiveStateCache {
|
||||
|
||||
return &liveStateCache{
|
||||
@@ -179,7 +181,7 @@ func NewLiveStateCache(
|
||||
kubectl: kubectl,
|
||||
settingsMgr: settingsMgr,
|
||||
metricsServer: metricsServer,
|
||||
clusterFilter: clusterFilter,
|
||||
clusterSharding: clusterSharding,
|
||||
resourceTracking: resourceTracking,
|
||||
}
|
||||
}
|
||||
@@ -196,14 +198,15 @@ type cacheSettings struct {
|
||||
}
|
||||
|
||||
type liveStateCache struct {
|
||||
db db.ArgoDB
|
||||
appInformer cache.SharedIndexInformer
|
||||
onObjectUpdated ObjectUpdatedHandler
|
||||
kubectl kube.Kubectl
|
||||
settingsMgr *settings.SettingsManager
|
||||
metricsServer *metrics.MetricsServer
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
resourceTracking argo.ResourceTracking
|
||||
db db.ArgoDB
|
||||
appInformer cache.SharedIndexInformer
|
||||
onObjectUpdated ObjectUpdatedHandler
|
||||
kubectl kube.Kubectl
|
||||
settingsMgr *settings.SettingsManager
|
||||
metricsServer *metrics.MetricsServer
|
||||
clusterSharding sharding.ClusterShardingCache
|
||||
resourceTracking argo.ResourceTracking
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
|
||||
clusters map[string]clustercache.ClusterCache
|
||||
cacheSettings cacheSettings
|
||||
@@ -287,7 +290,8 @@ func isRootAppNode(r *clustercache.Resource) bool {
|
||||
}
|
||||
|
||||
func getApp(r *clustercache.Resource, ns map[kube.ResourceKey]*clustercache.Resource) string {
|
||||
return getAppRecursive(r, ns, map[kube.ResourceKey]bool{})
|
||||
name, _ := getAppRecursive(r, ns, map[kube.ResourceKey]bool{})
|
||||
return name
|
||||
}
|
||||
|
||||
func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion {
|
||||
@@ -298,27 +302,31 @@ func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion {
|
||||
return gv
|
||||
}
|
||||
|
||||
func getAppRecursive(r *clustercache.Resource, ns map[kube.ResourceKey]*clustercache.Resource, visited map[kube.ResourceKey]bool) string {
|
||||
func getAppRecursive(r *clustercache.Resource, ns map[kube.ResourceKey]*clustercache.Resource, visited map[kube.ResourceKey]bool) (string, bool) {
|
||||
if !visited[r.ResourceKey()] {
|
||||
visited[r.ResourceKey()] = true
|
||||
} else {
|
||||
log.Warnf("Circular dependency detected: %v.", visited)
|
||||
return resInfo(r).AppName
|
||||
return resInfo(r).AppName, false
|
||||
}
|
||||
|
||||
if resInfo(r).AppName != "" {
|
||||
return resInfo(r).AppName
|
||||
return resInfo(r).AppName, true
|
||||
}
|
||||
for _, ownerRef := range r.OwnerRefs {
|
||||
gv := ownerRefGV(ownerRef)
|
||||
if parent, ok := ns[kube.NewResourceKey(gv.Group, ownerRef.Kind, r.Ref.Namespace, ownerRef.Name)]; ok {
|
||||
app := getAppRecursive(parent, ns, visited)
|
||||
if app != "" {
|
||||
return app
|
||||
visited_branch := make(map[kube.ResourceKey]bool, len(visited))
|
||||
for k, v := range visited {
|
||||
visited_branch[k] = v
|
||||
}
|
||||
app, ok := getAppRecursive(parent, ns, visited_branch)
|
||||
if app != "" || !ok {
|
||||
return app, ok
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
return "", true
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -486,7 +494,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
gvk := un.GroupVersionKind()
|
||||
|
||||
if cacheSettings.ignoreResourceUpdatesEnabled && shouldHashManifest(appName, gvk) {
|
||||
hash, err := generateManifestHash(un, nil, cacheSettings.resourceOverrides)
|
||||
hash, err := generateManifestHash(un, nil, cacheSettings.resourceOverrides, c.ignoreNormalizerOpts)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to generate manifest hash: %v", err)
|
||||
} else {
|
||||
@@ -722,22 +730,24 @@ func (c *liveStateCache) Run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) canHandleCluster(cluster *appv1.Cluster) bool {
|
||||
if c.clusterFilter == nil {
|
||||
return true
|
||||
}
|
||||
return c.clusterFilter(cluster)
|
||||
return c.clusterSharding.IsManagedCluster(cluster)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
|
||||
c.clusterSharding.Add(cluster)
|
||||
if !c.canHandleCluster(cluster) {
|
||||
log.Infof("Ignoring cluster %s", cluster.Server)
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
_, ok := c.clusters[cluster.Server]
|
||||
c.lock.Unlock()
|
||||
if !ok {
|
||||
log.Debugf("Checking if cache %v / cluster %v has appInformer %v", c, cluster, c.appInformer)
|
||||
if c.appInformer == nil {
|
||||
log.Warn("Cannot get a cluster appInformer. Cache may not be started this time")
|
||||
return
|
||||
}
|
||||
if c.isClusterHasApps(c.appInformer.GetStore().List(), cluster) {
|
||||
go func() {
|
||||
// warm up cache for cluster with apps
|
||||
@@ -748,6 +758,7 @@ func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *appv1.Cluster) {
|
||||
c.clusterSharding.Update(oldCluster, newCluster)
|
||||
c.lock.Lock()
|
||||
cluster, ok := c.clusters[newCluster.Server]
|
||||
c.lock.Unlock()
|
||||
@@ -790,6 +801,7 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
|
||||
|
||||
func (c *liveStateCache) handleDeleteEvent(clusterServer string) {
|
||||
c.lock.RLock()
|
||||
c.clusterSharding.Delete(clusterServer)
|
||||
cluster, ok := c.clusters[clusterServer]
|
||||
c.lock.RUnlock()
|
||||
if ok {
|
||||
|
||||
261
controller/cache/cache_test.go
vendored
261
controller/cache/cache_test.go
vendored
@@ -18,10 +18,15 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
@@ -35,11 +40,13 @@ func TestHandleModEvent_HasChanges(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
clusterCache.On("EnsureSynced").Return(nil).Once()
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -56,14 +63,22 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
clusterCache.On("EnsureSynced").Return(nil).Once()
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return false
|
||||
db: nil,
|
||||
appInformer: nil,
|
||||
onObjectUpdated: func(managedByApp map[string]bool, ref v1.ObjectReference) {
|
||||
},
|
||||
kubectl: nil,
|
||||
settingsMgr: &argosettings.SettingsManager{},
|
||||
metricsServer: &metrics.MetricsServer{},
|
||||
// returns a shard that never process any cluster
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
resourceTracking: nil,
|
||||
clusters: map[string]cache.ClusterCache{"https://mycluster": clusterCache},
|
||||
cacheSettings: cacheSettings{},
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -75,18 +90,20 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
|
||||
Namespaces: []string{"default"},
|
||||
})
|
||||
|
||||
assert.Len(t, clustersCache.clusters, 0)
|
||||
assert.Len(t, clustersCache.clusters, 1)
|
||||
}
|
||||
|
||||
func TestHandleModEvent_NoChanges(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything).Panic("should not invalidate")
|
||||
clusterCache.On("EnsureSynced").Return(nil).Panic("should not re-sync")
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -99,11 +116,11 @@ func TestHandleModEvent_NoChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandleAddEvent_ClusterExcluded(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return false
|
||||
},
|
||||
clusters: map[string]cache.ClusterCache{},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 2, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
clustersCache.handleAddEvent(&appv1.Cluster{
|
||||
Server: "https://mycluster",
|
||||
@@ -118,6 +135,8 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
Server: "https://mycluster",
|
||||
Config: appv1.ClusterConfig{Username: "bar"},
|
||||
}
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
|
||||
liveStateCacheLock := sync.RWMutex{}
|
||||
@@ -126,10 +145,8 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
testCluster.Server: gitopsEngineClusterCache,
|
||||
},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return true
|
||||
},
|
||||
settingsMgr: settingsMgr,
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
settingsMgr: settingsMgr,
|
||||
// Set the lock here so we can reference it later
|
||||
// nolint We need to overwrite here to have access to the lock
|
||||
lock: liveStateCacheLock,
|
||||
@@ -303,6 +320,216 @@ func Test_asResourceNode_owner_refs(t *testing.T) {
|
||||
assert.Equal(t, expected, resNode)
|
||||
}
|
||||
|
||||
func Test_getAppRecursive(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
r *cache.Resource
|
||||
ns map[kube.ResourceKey]*cache.Resource
|
||||
wantName string
|
||||
wantOK assert.BoolAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "ok: cm1->app1",
|
||||
r: &cache.Resource{
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "app1"},
|
||||
},
|
||||
},
|
||||
ns: map[kube.ResourceKey]*cache.Resource{
|
||||
kube.NewResourceKey("", "", "", "app1"): {
|
||||
Info: &ResourceInfo{
|
||||
AppName: "app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: "app1",
|
||||
wantOK: assert.True,
|
||||
},
|
||||
{
|
||||
name: "ok: cm1->cm2->app1",
|
||||
r: &cache.Resource{
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
},
|
||||
},
|
||||
ns: map[kube.ResourceKey]*cache.Resource{
|
||||
kube.NewResourceKey("", "", "", "cm2"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm2",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "app1"},
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "app1"): {
|
||||
Info: &ResourceInfo{
|
||||
AppName: "app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: "app1",
|
||||
wantOK: assert.True,
|
||||
},
|
||||
{
|
||||
name: "cm1->cm2->app1 & cm1->cm3->app1",
|
||||
r: &cache.Resource{
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
{Name: "cm3"},
|
||||
},
|
||||
},
|
||||
ns: map[kube.ResourceKey]*cache.Resource{
|
||||
kube.NewResourceKey("", "", "", "cm2"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm2",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "app1"},
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "cm3"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm3",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "app1"},
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "app1"): {
|
||||
Info: &ResourceInfo{
|
||||
AppName: "app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: "app1",
|
||||
wantOK: assert.True,
|
||||
},
|
||||
{
|
||||
// Nothing cycle.
|
||||
// Issue #11699, fixed #12667.
|
||||
name: "ok: cm1->cm2 & cm1->cm3->cm2 & cm1->cm3->app1",
|
||||
r: &cache.Resource{
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
{Name: "cm3"},
|
||||
},
|
||||
},
|
||||
ns: map[kube.ResourceKey]*cache.Resource{
|
||||
kube.NewResourceKey("", "", "", "cm2"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm2",
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "cm3"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm3",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
{Name: "app1"},
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "app1"): {
|
||||
Info: &ResourceInfo{
|
||||
AppName: "app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: "app1",
|
||||
wantOK: assert.True,
|
||||
},
|
||||
{
|
||||
name: "cycle: cm1<->cm2",
|
||||
r: &cache.Resource{
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
},
|
||||
},
|
||||
ns: map[kube.ResourceKey]*cache.Resource{
|
||||
kube.NewResourceKey("", "", "", "cm1"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "cm2"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm2",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: "",
|
||||
wantOK: assert.False,
|
||||
},
|
||||
{
|
||||
name: "cycle: cm1->cm2->cm3->cm1",
|
||||
r: &cache.Resource{
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
},
|
||||
},
|
||||
ns: map[kube.ResourceKey]*cache.Resource{
|
||||
kube.NewResourceKey("", "", "", "cm1"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm2"},
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "cm2"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm2",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm3"},
|
||||
},
|
||||
},
|
||||
kube.NewResourceKey("", "", "", "cm3"): {
|
||||
Ref: v1.ObjectReference{
|
||||
Name: "cm3",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{Name: "cm1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantName: "",
|
||||
wantOK: assert.False,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
visited := map[kube.ResourceKey]bool{}
|
||||
got, ok := getAppRecursive(tt.r, tt.ns, visited)
|
||||
assert.Equal(t, tt.wantName, got)
|
||||
tt.wantOK(t, ok)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkipResourceUpdate(t *testing.T) {
|
||||
var (
|
||||
hash1_x string = "x"
|
||||
|
||||
4
controller/cache/info.go
vendored
4
controller/cache/info.go
vendored
@@ -408,8 +408,8 @@ func populateHostNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func generateManifestHash(un *unstructured.Unstructured, ignores []v1alpha1.ResourceIgnoreDifferences, overrides map[string]v1alpha1.ResourceOverride) (string, error) {
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(ignores, overrides)
|
||||
func generateManifestHash(un *unstructured.Unstructured, ignores []v1alpha1.ResourceIgnoreDifferences, overrides map[string]v1alpha1.ResourceOverride, opts normalizers.IgnoreNormalizerOpts) (string, error) {
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(ignores, overrides, opts)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating normalizer: %w", err)
|
||||
}
|
||||
|
||||
3
controller/cache/info_test.go
vendored
3
controller/cache/info_test.go
vendored
@@ -16,6 +16,7 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
func strToUnstructured(jsonStr string) *unstructured.Unstructured {
|
||||
@@ -749,7 +750,7 @@ func TestManifestHash(t *testing.T) {
|
||||
|
||||
expected := hash(data)
|
||||
|
||||
hash, err := generateManifestHash(manifest, ignores, nil)
|
||||
hash, err := generateManifestHash(manifest, ignores, nil, normalizers.IgnoreNormalizerOpts{})
|
||||
assert.Equal(t, expected, hash)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/healthz"
|
||||
"github.com/argoproj/argo-cd/v2/util/profile"
|
||||
|
||||
ctrl_metrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
type MetricsServer struct {
|
||||
@@ -160,12 +162,12 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
|
||||
|
||||
mux := http.NewServeMux()
|
||||
registry := NewAppRegistry(appLister, appFilter, appLabels)
|
||||
registry.MustRegister(depth, adds, latency, workDuration, unfinished, longestRunningProcessor, retries)
|
||||
|
||||
mux.Handle(MetricsPath, promhttp.HandlerFor(prometheus.Gatherers{
|
||||
// contains app controller specific metrics
|
||||
registry,
|
||||
// contains process, golang and controller workqueues metrics
|
||||
prometheus.DefaultGatherer,
|
||||
// contains workqueue metrics, process and golang metrics
|
||||
ctrl_metrics.Registry,
|
||||
}, promhttp.HandlerOpts{}))
|
||||
profile.RegisterProfiler(mux)
|
||||
healthz.ServeHealthCheck(mux, healthCheck)
|
||||
|
||||
@@ -2,6 +2,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -15,12 +16,15 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
|
||||
appinformer "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions"
|
||||
applister "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
)
|
||||
|
||||
const fakeApp = `
|
||||
@@ -140,6 +144,12 @@ var appFilter = func(obj interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Create a fake controller so we initialize the internal controller metrics.
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/blob/4000e996a202917ad7d40f02ed8a2079a9ce25e9/pkg/internal/controller/metrics/metrics.go
|
||||
_, _ = controller.New("test-controller", nil, controller.Options{})
|
||||
}
|
||||
|
||||
func newFakeApp(fakeAppYAML string) *argoappv1.Application {
|
||||
var app argoappv1.Application
|
||||
err := yaml.Unmarshal([]byte(fakeAppYAML), &app)
|
||||
@@ -360,7 +370,7 @@ func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
assert.Contains(t, body, line, "expected metrics mismatch")
|
||||
assert.Contains(t, body, line, fmt.Sprintf("expected metrics mismatch for line: %s", line))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,3 +453,70 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa
|
||||
err = metricsServ.SetExpiration(time.Second)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestWorkqueueMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
# TYPE workqueue_adds_total counter
|
||||
workqueue_adds_total{name="test"}
|
||||
|
||||
# TYPE workqueue_depth gauge
|
||||
workqueue_depth{name="test"}
|
||||
|
||||
# TYPE workqueue_longest_running_processor_seconds gauge
|
||||
workqueue_longest_running_processor_seconds{name="test"}
|
||||
|
||||
# TYPE workqueue_queue_duration_seconds histogram
|
||||
|
||||
# TYPE workqueue_unfinished_work_seconds gauge
|
||||
workqueue_unfinished_work_seconds{name="test"}
|
||||
|
||||
# TYPE workqueue_work_duration_seconds histogram
|
||||
`
|
||||
workqueue.NewNamed("test")
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
metricsServ.Handler.ServeHTTP(rr, req)
|
||||
assert.Equal(t, rr.Code, http.StatusOK)
|
||||
body := rr.Body.String()
|
||||
log.Println(body)
|
||||
assertMetricsPrinted(t, expectedMetrics, body)
|
||||
}
|
||||
|
||||
func TestGoMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds_sum
|
||||
go_gc_duration_seconds_count
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines
|
||||
# TYPE go_info gauge
|
||||
go_info
|
||||
# TYPE go_memstats_alloc_bytes gauge
|
||||
go_memstats_alloc_bytes
|
||||
# TYPE go_memstats_sys_bytes gauge
|
||||
go_memstats_sys_bytes
|
||||
# TYPE go_threads gauge
|
||||
go_threads
|
||||
`
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
metricsServ.Handler.ServeHTTP(rr, req)
|
||||
assert.Equal(t, rr.Code, http.StatusOK)
|
||||
body := rr.Body.String()
|
||||
log.Println(body)
|
||||
assertMetricsPrinted(t, expectedMetrics, body)
|
||||
}
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
WorkQueueSubsystem = "workqueue"
|
||||
DepthKey = "depth"
|
||||
AddsKey = "adds_total"
|
||||
QueueLatencyKey = "queue_duration_seconds"
|
||||
WorkDurationKey = "work_duration_seconds"
|
||||
UnfinishedWorkKey = "unfinished_work_seconds"
|
||||
LongestRunningProcessorKey = "longest_running_processor_seconds"
|
||||
RetriesKey = "retries_total"
|
||||
)
|
||||
|
||||
var (
|
||||
depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: DepthKey,
|
||||
Help: "Current depth of workqueue",
|
||||
}, []string{"name"})
|
||||
|
||||
adds = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: AddsKey,
|
||||
Help: "Total number of adds handled by workqueue",
|
||||
}, []string{"name"})
|
||||
|
||||
latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: QueueLatencyKey,
|
||||
Help: "How long in seconds an item stays in workqueue before being requested",
|
||||
Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180},
|
||||
}, []string{"name"})
|
||||
|
||||
workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: WorkDurationKey,
|
||||
Help: "How long in seconds processing an item from workqueue takes.",
|
||||
Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180},
|
||||
}, []string{"name"})
|
||||
|
||||
unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: UnfinishedWorkKey,
|
||||
Help: "How many seconds of work has been done that " +
|
||||
"is in progress and hasn't been observed by work_duration. Large " +
|
||||
"values indicate stuck threads. One can deduce the number of stuck " +
|
||||
"threads by observing the rate at which this increases.",
|
||||
}, []string{"name"})
|
||||
|
||||
longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: LongestRunningProcessorKey,
|
||||
Help: "How many seconds has the longest running " +
|
||||
"processor for workqueue been running.",
|
||||
}, []string{"name"})
|
||||
|
||||
retries = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: RetriesKey,
|
||||
Help: "Total number of retries handled by workqueue",
|
||||
}, []string{"name"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
workqueue.SetProvider(workqueueMetricsProvider{})
|
||||
}
|
||||
|
||||
type workqueueMetricsProvider struct{}
|
||||
|
||||
func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
|
||||
return depth.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
|
||||
return adds.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
|
||||
return latency.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
|
||||
return workDuration.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return unfinished.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return longestRunningProcessor.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
||||
return retries.WithLabelValues(name)
|
||||
}
|
||||
185
controller/sharding/cache.go
Normal file
185
controller/sharding/cache.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ClusterShardingCache interface {
|
||||
Init(clusters *v1alpha1.ClusterList)
|
||||
Add(c *v1alpha1.Cluster)
|
||||
Delete(clusterServer string)
|
||||
Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster)
|
||||
IsManagedCluster(c *v1alpha1.Cluster) bool
|
||||
GetDistribution() map[string]int
|
||||
}
|
||||
|
||||
type ClusterSharding struct {
|
||||
Shard int
|
||||
Replicas int
|
||||
Shards map[string]int
|
||||
Clusters map[string]*v1alpha1.Cluster
|
||||
lock sync.RWMutex
|
||||
getClusterShard DistributionFunction
|
||||
}
|
||||
|
||||
func NewClusterSharding(_ db.ArgoDB, shard, replicas int, shardingAlgorithm string) ClusterShardingCache {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
clusterSharding := &ClusterSharding{
|
||||
Shard: shard,
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
}
|
||||
distributionFunction := NoShardingDistributionFunction()
|
||||
if replicas > 1 {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
distributionFunction = GetDistributionFunction(clusterSharding.GetClusterAccessor(), shardingAlgorithm, replicas)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
clusterSharding.getClusterShard = distributionFunction
|
||||
return clusterSharding
|
||||
}
|
||||
|
||||
// IsManagedCluster returns wheter or not the cluster should be processed by a given shard.
|
||||
func (s *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard
|
||||
return true
|
||||
}
|
||||
clusterShard := 0
|
||||
if shard, ok := s.Shards[c.Server]; ok {
|
||||
clusterShard = shard
|
||||
} else {
|
||||
log.Warnf("The cluster %s has no assigned shard.", c.Server)
|
||||
}
|
||||
log.Debugf("Checking if cluster %s with clusterShard %d should be processed by shard %d", c.Server, clusterShard, s.Shard)
|
||||
return clusterShard == s.Shard
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
newClusters := make(map[string]*v1alpha1.Cluster, len(clusters.Items))
|
||||
for _, c := range clusters.Items {
|
||||
cluster := c
|
||||
newClusters[c.Server] = &cluster
|
||||
}
|
||||
sharding.Clusters = newClusters
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Add(c *v1alpha1.Cluster) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
old, ok := sharding.Clusters[c.Server]
|
||||
sharding.Clusters[c.Server] = c
|
||||
if !ok || hasShardingUpdates(old, c) {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. Cluster already added")
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Delete(clusterServer string) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
if _, ok := sharding.Clusters[clusterServer]; ok {
|
||||
delete(sharding.Clusters, clusterServer)
|
||||
delete(sharding.Shards, clusterServer)
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
if _, ok := sharding.Clusters[oldCluster.Server]; ok && oldCluster.Server != newCluster.Server {
|
||||
delete(sharding.Clusters, oldCluster.Server)
|
||||
delete(sharding.Shards, oldCluster.Server)
|
||||
}
|
||||
sharding.Clusters[newCluster.Server] = newCluster
|
||||
if hasShardingUpdates(oldCluster, newCluster) {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. No relevant changes")
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) GetDistribution() map[string]int {
|
||||
sharding.lock.RLock()
|
||||
defer sharding.lock.RUnlock()
|
||||
shards := sharding.Shards
|
||||
|
||||
distribution := make(map[string]int, len(shards))
|
||||
for k, v := range shards {
|
||||
distribution[k] = v
|
||||
}
|
||||
return distribution
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) updateDistribution() {
|
||||
for k, c := range sharding.Clusters {
|
||||
shard := 0
|
||||
if c.Shard != nil {
|
||||
requestedShard := int(*c.Shard)
|
||||
if requestedShard < sharding.Replicas {
|
||||
shard = requestedShard
|
||||
} else {
|
||||
log.Warnf("Specified cluster shard (%d) for cluster: %s is greater than the number of available shard (%d). Using shard 0.", requestedShard, c.Server, sharding.Replicas)
|
||||
}
|
||||
} else {
|
||||
shard = sharding.getClusterShard(c)
|
||||
}
|
||||
|
||||
existingShard, ok := sharding.Shards[k]
|
||||
if ok && existingShard != shard {
|
||||
log.Infof("Cluster %s has changed shard from %d to %d", k, existingShard, shard)
|
||||
} else if !ok {
|
||||
log.Infof("Cluster %s has been assigned to shard %d", k, shard)
|
||||
} else {
|
||||
log.Debugf("Cluster %s has not changed shard", k)
|
||||
}
|
||||
sharding.Shards[k] = shard
|
||||
}
|
||||
}
|
||||
|
||||
// hasShardingUpdates returns true if the sharding distribution has explicitly changed
|
||||
func hasShardingUpdates(old, new *v1alpha1.Cluster) bool {
|
||||
if old == nil || new == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// returns true if the cluster id has changed because some sharding algorithms depend on it.
|
||||
if old.ID != new.ID {
|
||||
return true
|
||||
}
|
||||
|
||||
if old.Server != new.Server {
|
||||
return true
|
||||
}
|
||||
|
||||
// return false if the shard field has not been modified
|
||||
if old.Shard == nil && new.Shard == nil {
|
||||
return false
|
||||
}
|
||||
return old.Shard == nil || new.Shard == nil || int64(*old.Shard) != int64(*new.Shard)
|
||||
}
|
||||
|
||||
func (d *ClusterSharding) GetClusterAccessor() clusterAccessor {
|
||||
return func() []*v1alpha1.Cluster {
|
||||
// no need to lock, as this is only called from the updateDistribution function
|
||||
clusters := make([]*v1alpha1.Cluster, 0, len(d.Clusters))
|
||||
for _, c := range d.Clusters {
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
}
|
||||
475
controller/sharding/cache_test.go
Normal file
475
controller/sharding/cache_test.go
Normal file
@@ -0,0 +1,475 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func setupTestSharding(shard int, replicas int) *ClusterSharding {
|
||||
shardingAlgorithm := "legacy" // we are using the legacy algorithm as it is deterministic based on the cluster id which is easier to test
|
||||
db := &dbmocks.ArgoDB{}
|
||||
return NewClusterSharding(db, shard, replicas, shardingAlgorithm).(*ClusterSharding)
|
||||
}
|
||||
|
||||
func TestNewClusterSharding(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
assert.NotNil(t, sharding)
|
||||
assert.Equal(t, shard, sharding.Shard)
|
||||
assert.Equal(t, replicas, sharding.Replicas)
|
||||
assert.NotNil(t, sharding.Shards)
|
||||
assert.NotNil(t, sharding.Clusters)
|
||||
}
|
||||
|
||||
func TestClusterSharding_Add(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
clusterA := &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}
|
||||
|
||||
sharding.Add(clusterA)
|
||||
|
||||
clusterB := v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}
|
||||
|
||||
sharding.Add(&clusterB)
|
||||
|
||||
distribution := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
|
||||
clusterDistribution, ok := distribution[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistribution)
|
||||
|
||||
myClusterDistribution, ok := distribution[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, myClusterDistribution)
|
||||
|
||||
assert.Equal(t, 2, len(distribution))
|
||||
}
|
||||
|
||||
func TestClusterSharding_AddRoundRobin_Redistributes(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
sharding := NewClusterSharding(db, shard, replicas, "round-robin").(*ClusterSharding)
|
||||
|
||||
clusterA := &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}
|
||||
sharding.Add(clusterA)
|
||||
|
||||
clusterB := v1alpha1.Cluster{
|
||||
ID: "3",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}
|
||||
sharding.Add(&clusterB)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
|
||||
clusterDistributionA, ok := distributionBefore[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionA)
|
||||
|
||||
clusterDistributionB, ok := distributionBefore[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistributionB)
|
||||
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
clusterC := v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://1.1.1.1",
|
||||
}
|
||||
sharding.Add(&clusterC)
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterC.Server)
|
||||
|
||||
clusterDistributionA, ok = distributionAfter[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionA)
|
||||
|
||||
clusterDistributionC, ok := distributionAfter[clusterC.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistributionC) // will be assigned to shard 1 because the .ID is smaller then the "B" cluster
|
||||
|
||||
clusterDistributionB, ok = distributionAfter[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionB) // will be reassigned to shard 0 because the .ID is bigger then the "C" cluster
|
||||
}
|
||||
|
||||
func TestClusterSharding_Delete(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
sharding.Delete("https://kubernetes.default.svc")
|
||||
distribution := sharding.GetDistribution()
|
||||
assert.Equal(t, 1, len(distribution))
|
||||
}
|
||||
|
||||
func TestClusterSharding_Update(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
distributionA, ok := distributionBefore["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, distributionA)
|
||||
|
||||
sharding.Update(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}, &v1alpha1.Cluster{
|
||||
ID: "4",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
})
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionAfter))
|
||||
|
||||
distributionA, ok = distributionAfter["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, distributionA)
|
||||
}
|
||||
|
||||
func TestClusterSharding_UpdateServerName(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
distributionA, ok := distributionBefore["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, distributionA)
|
||||
|
||||
sharding.Update(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}, &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server2",
|
||||
})
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionAfter))
|
||||
|
||||
_, ok = distributionAfter["https://kubernetes.default.svc"]
|
||||
assert.False(t, ok) // the old server name should not be present anymore
|
||||
|
||||
_, ok = distributionAfter["https://server2"]
|
||||
assert.True(t, ok) // the new server name should be present
|
||||
}
|
||||
|
||||
func TestClusterSharding_IsManagedCluster(t *testing.T) {
|
||||
replicas := 2
|
||||
sharding0 := setupTestSharding(0, replicas)
|
||||
|
||||
sharding0.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.True(t, sharding0.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}))
|
||||
|
||||
assert.False(t, sharding0.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}))
|
||||
|
||||
sharding1 := setupTestSharding(1, replicas)
|
||||
|
||||
sharding1.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.False(t, sharding1.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}))
|
||||
|
||||
assert.True(t, sharding1.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
func TestClusterSharding_ClusterShardOfResourceShouldNotBeChanged(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
Int64Ptr := func(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
clusterWithNil := &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
Shard: nil,
|
||||
}
|
||||
|
||||
clusterWithValue := &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
}
|
||||
|
||||
clusterWithToBigValue := &v1alpha1.Cluster{
|
||||
ID: "3",
|
||||
Server: "https://1.1.1.1",
|
||||
Shard: Int64Ptr(999), // shard value is explicitly bigger than the number of replicas
|
||||
}
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
*clusterWithNil,
|
||||
*clusterWithValue,
|
||||
*clusterWithToBigValue,
|
||||
},
|
||||
},
|
||||
)
|
||||
distribution := sharding.GetDistribution()
|
||||
assert.Equal(t, 3, len(distribution))
|
||||
|
||||
assert.Nil(t, sharding.Clusters[clusterWithNil.Server].Shard)
|
||||
|
||||
assert.NotNil(t, sharding.Clusters[clusterWithValue.Server].Shard)
|
||||
assert.Equal(t, int64(1), *sharding.Clusters[clusterWithValue.Server].Shard)
|
||||
assert.Equal(t, 1, distribution[clusterWithValue.Server])
|
||||
|
||||
assert.NotNil(t, sharding.Clusters[clusterWithToBigValue.Server].Shard)
|
||||
assert.Equal(t, int64(999), *sharding.Clusters[clusterWithToBigValue.Server].Shard)
|
||||
assert.Equal(t, 0, distribution[clusterWithToBigValue.Server]) // will be assigned to shard 0 because the value is bigger than the number of replicas
|
||||
}
|
||||
|
||||
func TestHasShardingUpdates(t *testing.T) {
|
||||
Int64Ptr := func(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
old *v1alpha1.Cluster
|
||||
new *v1alpha1.Cluster
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "No updates",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Updates",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Old is nil",
|
||||
old: nil,
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "New is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Both are nil",
|
||||
old: nil,
|
||||
new: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Both shards are nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Old shard is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "New shard is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Cluster ID has changed",
|
||||
old: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Server has changed",
|
||||
old: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server1",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server2",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, hasShardingUpdates(tc.old, tc.new))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
log "github.com/sirupsen/logrus"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -40,6 +42,7 @@ const ShardControllerMappingKey = "shardControllerMapping"
|
||||
|
||||
type DistributionFunction func(c *v1alpha1.Cluster) int
|
||||
type ClusterFilterFunction func(c *v1alpha1.Cluster) bool
|
||||
type clusterAccessor func() []*v1alpha1.Cluster
|
||||
|
||||
// shardApplicationControllerMapping stores the mapping of Shard Number to Application Controller in ConfigMap.
|
||||
// It also stores the heartbeat of last synced time of the application controller.
|
||||
@@ -53,8 +56,7 @@ type shardApplicationControllerMapping struct {
|
||||
// and returns wheter or not the cluster should be processed by a given shard. It calls the distributionFunction
|
||||
// to determine which shard will process the cluster, and if the given shard is equal to the calculated shard
|
||||
// the function will return true.
|
||||
func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, shard int) ClusterFilterFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, replicas, shard int) ClusterFilterFunction {
|
||||
return func(c *v1alpha1.Cluster) bool {
|
||||
clusterShard := 0
|
||||
if c != nil && c.Shard != nil {
|
||||
@@ -73,14 +75,14 @@ func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, s
|
||||
|
||||
// GetDistributionFunction returns which DistributionFunction should be used based on the passed algorithm and
|
||||
// the current datas.
|
||||
func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) DistributionFunction {
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction(db)
|
||||
func GetDistributionFunction(clusters clusterAccessor, shardingAlgorithm string, replicasCount int) DistributionFunction {
|
||||
log.Debugf("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction(replicasCount)
|
||||
switch shardingAlgorithm {
|
||||
case common.RoundRobinShardingAlgorithm:
|
||||
distributionFunction = RoundRobinDistributionFunction(db)
|
||||
distributionFunction = RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
case common.LegacyShardingAlgorithm:
|
||||
distributionFunction = LegacyDistributionFunction(db)
|
||||
distributionFunction = LegacyDistributionFunction(replicasCount)
|
||||
default:
|
||||
log.Warnf("distribution type %s is not supported, defaulting to %s", shardingAlgorithm, common.DefaultShardingAlgorithm)
|
||||
}
|
||||
@@ -92,15 +94,21 @@ func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) Distributio
|
||||
// is lightweight and can be distributed easily, however, it does not ensure an homogenous distribution as
|
||||
// some shards may get assigned more clusters than others. It is the legacy function distribution that is
|
||||
// kept for compatibility reasons
|
||||
func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
func LegacyDistributionFunction(replicas int) DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas == 0 {
|
||||
log.Debugf("Replicas count is : %d, returning -1", replicas)
|
||||
return -1
|
||||
}
|
||||
if c == nil {
|
||||
log.Debug("In-cluster: returning 0")
|
||||
return 0
|
||||
}
|
||||
// if Shard is manually set and the assigned value is lower than the number of replicas,
|
||||
// then its value is returned otherwise it is the default calculated value
|
||||
if c.Shard != nil && int(*c.Shard) < replicas {
|
||||
return int(*c.Shard)
|
||||
}
|
||||
id := c.ID
|
||||
log.Debugf("Calculating cluster shard for cluster id: %s", id)
|
||||
if id == "" {
|
||||
@@ -121,14 +129,19 @@ func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
// This function ensures an homogenous distribution: each shards got assigned the same number of
|
||||
// clusters +/-1 , but with the drawback of a reshuffling of clusters accross shards in case of some changes
|
||||
// in the cluster list
|
||||
func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
|
||||
func RoundRobinDistributionFunction(clusters clusterAccessor, replicas int) DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas > 0 {
|
||||
if c == nil { // in-cluster does not necessarly have a secret assigned. So we are receiving a nil cluster here.
|
||||
return 0
|
||||
}
|
||||
// if Shard is manually set and the assigned value is lower than the number of replicas,
|
||||
// then its value is returned otherwise it is the default calculated value
|
||||
if c.Shard != nil && int(*c.Shard) < replicas {
|
||||
return int(*c.Shard)
|
||||
} else {
|
||||
clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(db)
|
||||
clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(clusters)
|
||||
clusterIndex, ok := clusterIndexdByClusterIdMap[c.ID]
|
||||
if !ok {
|
||||
log.Warnf("Cluster with id=%s not found in cluster map.", c.ID)
|
||||
@@ -144,6 +157,12 @@ func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
}
|
||||
}
|
||||
|
||||
// NoShardingDistributionFunction returns a DistributionFunction that will process all cluster by shard 0
|
||||
// the function is created for API compatibility purposes and is not supposed to be activated.
|
||||
func NoShardingDistributionFunction() DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int { return 0 }
|
||||
}
|
||||
|
||||
// InferShard extracts the shard index based on its hostname.
|
||||
func InferShard() (int, error) {
|
||||
hostname, err := osHostnameFunction()
|
||||
@@ -152,33 +171,29 @@ func InferShard() (int, error) {
|
||||
}
|
||||
parts := strings.Split(hostname, "-")
|
||||
if len(parts) == 0 {
|
||||
return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
|
||||
log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
|
||||
return 0, nil
|
||||
}
|
||||
shard, err := strconv.Atoi(parts[len(parts)-1])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
|
||||
log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
|
||||
return 0, nil
|
||||
}
|
||||
return int(shard), nil
|
||||
}
|
||||
|
||||
func getSortedClustersList(db db.ArgoDB) []v1alpha1.Cluster {
|
||||
ctx := context.Background()
|
||||
clustersList, dbErr := db.ListClusters(ctx)
|
||||
if dbErr != nil {
|
||||
log.Warnf("Error while querying clusters list from database: %v", dbErr)
|
||||
return []v1alpha1.Cluster{}
|
||||
}
|
||||
clusters := clustersList.Items
|
||||
func getSortedClustersList(getCluster clusterAccessor) []*v1alpha1.Cluster {
|
||||
clusters := getCluster()
|
||||
sort.Slice(clusters, func(i, j int) bool {
|
||||
return clusters[i].ID < clusters[j].ID
|
||||
})
|
||||
return clusters
|
||||
}
|
||||
|
||||
func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
|
||||
clusters := getSortedClustersList(db)
|
||||
func createClusterIndexByClusterIdMap(getCluster clusterAccessor) map[string]int {
|
||||
clusters := getSortedClustersList(getCluster)
|
||||
log.Debugf("ClustersList has %d items", len(clusters))
|
||||
clusterById := make(map[string]v1alpha1.Cluster)
|
||||
clusterById := make(map[string]*v1alpha1.Cluster)
|
||||
clusterIndexedByClusterId := make(map[string]int)
|
||||
for i, cluster := range clusters {
|
||||
log.Debugf("Adding cluster with id=%s and name=%s to cluster's map", cluster.ID, cluster.Name)
|
||||
@@ -193,8 +208,7 @@ func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
|
||||
// The function takes the shard number from the environment variable (default value -1, if not set) and passes it to this function.
|
||||
// If the shard value passed to this function is -1, that is, the shard was not set as an environment variable,
|
||||
// we default the shard number to 0 for computing the default config map.
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient kubernetes.Interface, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
hostname, err := osHostnameFunction()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
@@ -351,3 +365,59 @@ func getDefaultShardMappingData(replicas int) []shardApplicationControllerMappin
|
||||
}
|
||||
return shardMappingData
|
||||
}
|
||||
|
||||
func GetClusterSharding(kubeClient kubernetes.Interface, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) (ClusterShardingCache, error) {
|
||||
var replicasCount int
|
||||
if enableDynamicClusterDistribution {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if app controller deployment is not found when dynamic cluster distribution is enabled error out
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: %v", err)
|
||||
}
|
||||
|
||||
if appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment replica count")
|
||||
}
|
||||
|
||||
} else {
|
||||
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if replicasCount > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution {
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shardNumber, err = GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
|
||||
if err != nil && !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shardNumber < 0 {
|
||||
var err error
|
||||
shardNumber, err = InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
if shardNumber > replicasCount {
|
||||
log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
|
||||
shardNumber = 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
shardNumber = 0
|
||||
}
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
return NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm), nil
|
||||
}
|
||||
|
||||
@@ -1,36 +1,44 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestGetShardByID_NotEmptyID(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "4"}))
|
||||
}
|
||||
|
||||
func TestGetShardByID_EmptyID(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(replicasCount)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, 0, shard)
|
||||
}
|
||||
|
||||
@@ -38,7 +46,7 @@ func TestGetShardByID_NoReplicas(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(0)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
@@ -46,16 +54,16 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunction(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(0)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
// Test with replicas set to 0
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, 0)
|
||||
assert.Equal(t, -1, distributionFunction(nil))
|
||||
assert.Equal(t, -1, distributionFunction(&cluster1))
|
||||
assert.Equal(t, -1, distributionFunction(&cluster2))
|
||||
@@ -65,137 +73,112 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *tes
|
||||
}
|
||||
|
||||
func TestGetClusterFilterDefault(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
clusterAccessor, _, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 2
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterLegacy(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterUnknown(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
// Test with replicas set to 0
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "unknown")
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, "unknown"), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := GetDistributionFunction(clusterAccessor, "unknown", replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "5")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 5
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
filter := GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, 0, filter(nil))
|
||||
assert.Equal(t, 4, filter(&cluster1))
|
||||
assert.Equal(t, 1, filter(&cluster2))
|
||||
assert.Equal(t, 2, filter(&cluster3))
|
||||
assert.Equal(t, 2, filter(&cluster4))
|
||||
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
cluster5 := &v1alpha1.Cluster{ID: "5", Shard: &fixedShard}
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
cluster5.Shard = &fixedShard
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
assert.False(t, filter(&cluster3))
|
||||
assert.True(t, filter(&cluster4))
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "4")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 4
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
|
||||
filter := GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, filter(nil), 0)
|
||||
assert.Equal(t, filter(&cluster1), 0)
|
||||
assert.Equal(t, filter(&cluster2), 1)
|
||||
assert.Equal(t, filter(&cluster3), 2)
|
||||
assert.Equal(t, filter(&cluster4), 3)
|
||||
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
var fixedShard int64 = 1
|
||||
cluster5 := v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterLegacyHash(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "hash")
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
assert.False(t, filter(&cluster3))
|
||||
assert.True(t, filter(&cluster4))
|
||||
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterWithEnvControllerShardingAlgorithms(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
shardIndex := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
|
||||
t.Run("legacy", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster4))
|
||||
assert.False(t, shardShouldProcessCluster(nil))
|
||||
})
|
||||
|
||||
t.Run("roundrobin", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster4))
|
||||
assert.False(t, shardShouldProcessCluster(nil))
|
||||
})
|
||||
cluster5 = v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters = []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
|
||||
t.Run("replicas set to 1", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(1).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster2))
|
||||
@@ -205,8 +188,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 2", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(2).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -216,8 +200,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 3", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(3).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 3
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -233,17 +218,19 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterNumber
|
||||
// Initial tests where showing that under 1024 clusters, execution time was around 400ms
|
||||
// and for 4096 clusters, execution time was under 9s
|
||||
// The other implementation was giving almost linear time of 400ms up to 10'000 clusters
|
||||
db := dbmocks.ArgoDB{}
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{}}
|
||||
clusterPointers := []*v1alpha1.Cluster{}
|
||||
for i := 0; i < 2048; i++ {
|
||||
cluster := createCluster(fmt.Sprintf("cluster-%d", i), fmt.Sprintf("%d", i))
|
||||
clusterList.Items = append(clusterList.Items, cluster)
|
||||
clusterPointers = append(clusterPointers, &cluster)
|
||||
}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
for i, c := range clusterList.Items {
|
||||
assert.Equal(t, i%2, distributionFunction(&c))
|
||||
replicasCount := 2
|
||||
t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
|
||||
_, db, _, _, _, _, _ := createTestClusters()
|
||||
clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
for i, c := range clusterPointers {
|
||||
assert.Equal(t, i%2, distributionFunction(c))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,12 +243,15 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
cluster5 := createCluster("cluster5", "5")
|
||||
cluster6 := createCluster("cluster6", "6")
|
||||
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor := getClusterAccessor(clusters)
|
||||
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
|
||||
// Test with replicas set to 2
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -272,17 +262,20 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
|
||||
// Now, the database knows cluster6. Shard should be assigned a proper shard
|
||||
clusterList.Items = append(clusterList.Items, cluster6)
|
||||
distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
|
||||
assert.Equal(t, 1, distributionFunction(&cluster6))
|
||||
|
||||
// Now, we remove the last added cluster, it should be unassigned as well
|
||||
clusterList.Items = clusterList.Items[:len(clusterList.Items)-1]
|
||||
distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
|
||||
assert.Equal(t, -1, distributionFunction(&cluster6))
|
||||
}
|
||||
|
||||
func TestGetShardByIndexModuloReplicasCountDistributionFunction(t *testing.T) {
|
||||
db, cluster1, cluster2, _, _, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
clusters, db, cluster1, cluster2, _, _, _ := createTestClusters()
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
|
||||
// Test that the function returns the correct shard for cluster1 and cluster2
|
||||
expectedShardForCluster1 := 0
|
||||
@@ -315,14 +308,14 @@ func TestInferShard(t *testing.T) {
|
||||
|
||||
osHostnameFunction = func() (string, error) { return "exampleshard", nil }
|
||||
_, err = InferShard()
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
osHostnameFunction = func() (string, error) { return "example-shard", nil }
|
||||
_, err = InferShard()
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
|
||||
func createTestClusters() (clusterAccessor, *dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
|
||||
db := dbmocks.ArgoDB{}
|
||||
cluster1 := createCluster("cluster1", "1")
|
||||
cluster2 := createCluster("cluster2", "2")
|
||||
@@ -330,10 +323,27 @@ func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster,
|
||||
cluster4 := createCluster("cluster4", "4")
|
||||
cluster5 := createCluster("cluster5", "5")
|
||||
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
|
||||
db.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
cluster1, cluster2, cluster3, cluster4, cluster5,
|
||||
}}, nil)
|
||||
return &db, cluster1, cluster2, cluster3, cluster4, cluster5
|
||||
return getClusterAccessor(clusters), &db, cluster1, cluster2, cluster3, cluster4, cluster5
|
||||
}
|
||||
|
||||
func getClusterAccessor(clusters []v1alpha1.Cluster) clusterAccessor {
|
||||
// Convert the array to a slice of pointers
|
||||
clusterPointers := getClusterPointers(clusters)
|
||||
clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
|
||||
return clusterAccessor
|
||||
}
|
||||
|
||||
func getClusterPointers(clusters []v1alpha1.Cluster) []*v1alpha1.Cluster {
|
||||
var clusterPointers []*v1alpha1.Cluster
|
||||
for i := range clusters {
|
||||
clusterPointers = append(clusterPointers, &clusters[i])
|
||||
}
|
||||
return clusterPointers
|
||||
}
|
||||
|
||||
func createCluster(name string, id string) v1alpha1.Cluster {
|
||||
@@ -676,3 +686,187 @@ func Test_getOrUpdateShardNumberForController(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClusterSharding(t *testing.T) {
|
||||
IntPtr := func(i int32) *int32 {
|
||||
return &i
|
||||
}
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.DefaultApplicationControllerName,
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: IntPtr(1),
|
||||
},
|
||||
}
|
||||
|
||||
deploymentMultiReplicas := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-application-controller-multi-replicas",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: IntPtr(3),
|
||||
},
|
||||
}
|
||||
|
||||
objects := append([]runtime.Object{}, deployment, deploymentMultiReplicas)
|
||||
kubeclientset := kubefake.NewSimpleClientset(objects...)
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "argocd", settings.WithRepoOrClusterChangedHandler(func() {
|
||||
}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
useDynamicSharding bool
|
||||
envsSetter func(t *testing.T)
|
||||
cleanup func()
|
||||
expectedShard int
|
||||
expectedReplicas int
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "Default sharding with statefulset",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Default sharding with deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Default sharding with deployment and multiple replicas",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Statefulset multiple replicas",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "3")
|
||||
osHostnameFunction = func() (string, error) { return "example-shard-3", nil }
|
||||
},
|
||||
cleanup: func() {
|
||||
osHostnameFunction = os.Hostname
|
||||
},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 3,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 1 replica",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 2 replica - and to high shard",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 2,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 2 replica",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
t.Setenv(common.EnvControllerShard, "1")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 1,
|
||||
expectedReplicas: 2,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with deployment and multiple replicas will read from configmap",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Dynamic sharding but missing deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "missing-deployment")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: deployments.apps \"missing-deployment\" not found"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.envsSetter(t)
|
||||
defer tc.cleanup()
|
||||
shardingCache, err := GetClusterSharding(kubeclientset, settingsMgr, "round-robin", tc.useDynamicSharding)
|
||||
|
||||
if shardingCache != nil {
|
||||
clusterSharding := shardingCache.(*ClusterSharding)
|
||||
assert.Equal(t, tc.expectedShard, clusterSharding.Shard)
|
||||
assert.Equal(t, tc.expectedReplicas, clusterSharding.Replicas)
|
||||
}
|
||||
|
||||
if tc.expectedErr != nil {
|
||||
if err != nil {
|
||||
assert.Equal(t, tc.expectedErr.Error(), err.Error())
|
||||
} else {
|
||||
t.Errorf("Expected error %v but got nil", tc.expectedErr)
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package sharding
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
@@ -22,9 +23,11 @@ func TestLargeShuffle(t *testing.T) {
|
||||
clusterList.Items = append(clusterList.Items, cluster)
|
||||
}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
clusterAccessor := getClusterAccessor(clusterList.Items)
|
||||
// Test with replicas set to 256
|
||||
t.Setenv(common.EnvControllerReplicas, "256")
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 256
|
||||
t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
for i, c := range clusterList.Items {
|
||||
assert.Equal(t, i%2567, distributionFunction(&c))
|
||||
}
|
||||
@@ -44,10 +47,11 @@ func TestShuffle(t *testing.T) {
|
||||
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5, cluster6}}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
|
||||
clusterAccessor := getClusterAccessor(clusterList.Items)
|
||||
// Test with replicas set to 3
|
||||
t.Setenv(common.EnvControllerReplicas, "3")
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 3
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/gpg"
|
||||
@@ -117,6 +118,7 @@ type appStateManager struct {
|
||||
repoErrorCache goSync.Map
|
||||
repoErrorGracePeriod time.Duration
|
||||
serverSideDiff bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
}
|
||||
|
||||
// GetRepoObjs will generate the manifests for the given application delegating the
|
||||
@@ -605,7 +607,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
useDiffCache := useDiffCache(noCache, manifestInfos, sources, app, manifestRevisions, m.statusRefreshTimeout, serverSideDiff, logCtx)
|
||||
|
||||
diffConfigBuilder := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles, m.ignoreNormalizerOpts).
|
||||
WithTracking(appLabelKey, string(trackingMethod))
|
||||
|
||||
if useDiffCache {
|
||||
@@ -935,6 +937,7 @@ func NewAppStateManager(
|
||||
persistResourceHealth bool,
|
||||
repoErrorGracePeriod time.Duration,
|
||||
serverSideDiff bool,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
) AppStateManager {
|
||||
return &appStateManager{
|
||||
liveStateCache: liveStateCache,
|
||||
@@ -952,6 +955,7 @@ func NewAppStateManager(
|
||||
persistResourceHealth: persistResourceHealth,
|
||||
repoErrorGracePeriod: repoErrorGracePeriod,
|
||||
serverSideDiff: serverSideDiff,
|
||||
ignoreNormalizerOpts: ignoreNormalizerOpts,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
cdcommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
@@ -399,11 +400,10 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
}
|
||||
|
||||
// normalizeTargetResources will apply the diff normalization in all live and target resources.
|
||||
// Then it calculates the merge patch between the normalized live and the current live resources.
|
||||
// Finally it applies the merge patch in the normalized target resources. This is done to ensure
|
||||
// that target resources have the same ignored diff fields values from live ones to avoid them to
|
||||
// be applied in the cluster. Returns the list of normalized target resources.
|
||||
// normalizeTargetResources modifies target resources to ensure ignored fields are not touched during synchronization:
|
||||
// - applies normalization to the target resources based on the live resources
|
||||
// - copies ignored fields from the matching live resources: apply normalizer to the live resource,
|
||||
// calculates the patch performed by normalizer and applies the patch to the target resource
|
||||
func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructured, error) {
|
||||
// normalize live and target resources
|
||||
normalized, err := diff.Normalize(cr.reconciliationResult.Live, cr.reconciliationResult.Target, cr.diffConfig)
|
||||
@@ -422,94 +422,35 @@ func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructure
|
||||
patchedTargets = append(patchedTargets, originalTarget)
|
||||
continue
|
||||
}
|
||||
// calculate targetPatch between normalized and target resource
|
||||
targetPatch, err := getMergePatch(normalizedTarget, originalTarget)
|
||||
|
||||
var lookupPatchMeta *strategicpatch.PatchMetaFromStruct
|
||||
versionedObject, err := scheme.Scheme.New(normalizedTarget.GroupVersionKind())
|
||||
if err == nil {
|
||||
meta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookupPatchMeta = &meta
|
||||
}
|
||||
|
||||
livePatch, err := getMergePatch(normalized.Lives[idx], live, lookupPatchMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check if there is a patch to apply. An empty patch is identified by a '{}' string.
|
||||
if len(targetPatch) > 2 {
|
||||
livePatch, err := getMergePatch(normalized.Lives[idx], live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// generate a minimal patch that uses the fields from targetPatch (template)
|
||||
// with livePatch values
|
||||
patch, err := compilePatch(targetPatch, livePatch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
normalizedTarget, err = applyMergePatch(normalizedTarget, patch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// if there is no patch just use the original target
|
||||
normalizedTarget = originalTarget
|
||||
normalizedTarget, err = applyMergePatch(normalizedTarget, livePatch, versionedObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
patchedTargets = append(patchedTargets, normalizedTarget)
|
||||
}
|
||||
return patchedTargets, nil
|
||||
}
|
||||
|
||||
// compilePatch will generate a patch using the fields from templatePatch with
|
||||
// the values from valuePatch.
|
||||
func compilePatch(templatePatch, valuePatch []byte) ([]byte, error) {
|
||||
templateMap := make(map[string]interface{})
|
||||
err := json.Unmarshal(templatePatch, &templateMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
valueMap := make(map[string]interface{})
|
||||
err = json.Unmarshal(valuePatch, &valueMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultMap := intersectMap(templateMap, valueMap)
|
||||
return json.Marshal(resultMap)
|
||||
}
|
||||
|
||||
// intersectMap will return map with the fields intersection from the 2 provided
|
||||
// maps populated with the valueMap values.
|
||||
func intersectMap(templateMap, valueMap map[string]interface{}) map[string]interface{} {
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range templateMap {
|
||||
if innerTMap, ok := v.(map[string]interface{}); ok {
|
||||
if innerVMap, ok := valueMap[k].(map[string]interface{}); ok {
|
||||
result[k] = intersectMap(innerTMap, innerVMap)
|
||||
}
|
||||
} else if innerTSlice, ok := v.([]interface{}); ok {
|
||||
if innerVSlice, ok := valueMap[k].([]interface{}); ok {
|
||||
items := []interface{}{}
|
||||
for idx, innerTSliceValue := range innerTSlice {
|
||||
if idx < len(innerVSlice) {
|
||||
if tSliceValueMap, ok := innerTSliceValue.(map[string]interface{}); ok {
|
||||
if vSliceValueMap, ok := innerVSlice[idx].(map[string]interface{}); ok {
|
||||
item := intersectMap(tSliceValueMap, vSliceValueMap)
|
||||
items = append(items, item)
|
||||
}
|
||||
} else {
|
||||
items = append(items, innerVSlice[idx])
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(items) > 0 {
|
||||
result[k] = items
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if _, ok := valueMap[k]; ok {
|
||||
result[k] = valueMap[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// getMergePatch calculates and returns the patch between the original and the
|
||||
// modified unstructures.
|
||||
func getMergePatch(original, modified *unstructured.Unstructured) ([]byte, error) {
|
||||
func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMeta *strategicpatch.PatchMetaFromStruct) ([]byte, error) {
|
||||
originalJSON, err := original.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -518,20 +459,30 @@ func getMergePatch(original, modified *unstructured.Unstructured) ([]byte, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lookupPatchMeta != nil {
|
||||
return strategicpatch.CreateThreeWayMergePatch(modifiedJSON, modifiedJSON, originalJSON, lookupPatchMeta, true)
|
||||
}
|
||||
|
||||
return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// applyMergePatch will apply the given patch in the obj and return the patched
|
||||
// unstructure.
|
||||
func applyMergePatch(obj *unstructured.Unstructured, patch []byte) (*unstructured.Unstructured, error) {
|
||||
func applyMergePatch(obj *unstructured.Unstructured, patch []byte, versionedObject interface{}) (*unstructured.Unstructured, error) {
|
||||
originalJSON, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patchedJSON, err := jsonpatch.MergePatch(originalJSON, patch)
|
||||
var patchedJSON []byte
|
||||
if versionedObject == nil {
|
||||
patchedJSON, err = jsonpatch.MergePatch(originalJSON, patch)
|
||||
} else {
|
||||
patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, patch, versionedObject)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
patchedObj := &unstructured.Unstructured{}
|
||||
_, _, err = unstructured.UnstructuredJSONScheme.Decode(patchedJSON, nil, patchedObj)
|
||||
if err != nil {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
func TestPersistRevisionHistory(t *testing.T) {
|
||||
@@ -261,7 +262,7 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
setup := func(t *testing.T, ignores []v1alpha1.ResourceIgnoreDifferences) *fixture {
|
||||
t.Helper()
|
||||
dc, err := diff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(ignores, nil, true).
|
||||
WithDiffSettings(ignores, nil, true, normalizers.IgnoreNormalizerOpts{}).
|
||||
WithNoCache().
|
||||
Build()
|
||||
require.NoError(t, err)
|
||||
@@ -386,3 +387,207 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
assert.Equal(t, 2, len(containers))
|
||||
})
|
||||
}
|
||||
|
||||
func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
type fixture struct {
|
||||
comparisonResult *comparisonResult
|
||||
}
|
||||
setupHttpProxy := func(t *testing.T, ignores []v1alpha1.ResourceIgnoreDifferences) *fixture {
|
||||
t.Helper()
|
||||
dc, err := diff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(ignores, nil, true, normalizers.IgnoreNormalizerOpts{}).
|
||||
WithNoCache().
|
||||
Build()
|
||||
require.NoError(t, err)
|
||||
live := test.YamlToUnstructured(testdata.LiveHTTPProxy)
|
||||
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
|
||||
return &fixture{
|
||||
&comparisonResult{
|
||||
reconciliationResult: sync.ReconciliationResult{
|
||||
Live: []*unstructured.Unstructured{live},
|
||||
Target: []*unstructured.Unstructured{target},
|
||||
},
|
||||
diffConfig: dc,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("will properly ignore nested fields within arrays", func(t *testing.T) {
|
||||
// given
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "projectcontour.io",
|
||||
Kind: "HTTPProxy",
|
||||
JQPathExpressions: []string{".spec.routes[]"},
|
||||
//JSONPointers: []string{"/spec/routes"},
|
||||
},
|
||||
}
|
||||
f := setupHttpProxy(t, ignores)
|
||||
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
patchedTargets, err := normalizeTargetResources(f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(f.comparisonResult.reconciliationResult.Live))
|
||||
require.Equal(t, 1, len(f.comparisonResult.reconciliationResult.Target))
|
||||
require.Equal(t, 1, len(patchedTargets))
|
||||
|
||||
// live should have 1 entry
|
||||
require.Equal(t, 1, len(dig[[]any](f.comparisonResult.reconciliationResult.Live[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors"})))
|
||||
// assert some arbitrary field to show `entries[0]` is not an empty object
|
||||
require.Equal(t, "sample-header", dig[string](f.comparisonResult.reconciliationResult.Live[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries", 0, "requestHeader", "headerName"}))
|
||||
|
||||
// target has 2 entries
|
||||
require.Equal(t, 2, len(dig[[]any](f.comparisonResult.reconciliationResult.Target[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries"})))
|
||||
// assert some arbitrary field to show `entries[0]` is not an empty object
|
||||
require.Equal(t, "sample-header", dig[string](f.comparisonResult.reconciliationResult.Target[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries", 0, "requestHeaderValueMatch", "headers", 0, "name"}))
|
||||
|
||||
// It should be *1* entries in the array
|
||||
require.Equal(t, 1, len(dig[[]any](patchedTargets[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors"})))
|
||||
// and it should NOT equal an empty object
|
||||
require.Len(t, dig[any](patchedTargets[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries", 0}), 1)
|
||||
|
||||
})
|
||||
t.Run("will correctly set array entries if new entries have been added", func(t *testing.T) {
|
||||
// given
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
JQPathExpressions: []string{".spec.template.spec.containers[].env[] | select(.name == \"SOME_ENV_VAR\")"},
|
||||
},
|
||||
}
|
||||
f := setupHttpProxy(t, ignores)
|
||||
live := test.YamlToUnstructured(testdata.LiveDeploymentEnvVarsYaml)
|
||||
target := test.YamlToUnstructured(testdata.TargetDeploymentEnvVarsYaml)
|
||||
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(targets))
|
||||
containers, ok, err := unstructured.NestedSlice(targets[0].Object, "spec", "template", "spec", "containers")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 1, len(containers))
|
||||
|
||||
ports := containers[0].(map[string]interface{})["ports"].([]interface{})
|
||||
assert.Equal(t, 1, len(ports))
|
||||
|
||||
env := containers[0].(map[string]interface{})["env"].([]interface{})
|
||||
assert.Equal(t, 3, len(env))
|
||||
|
||||
first := env[0]
|
||||
second := env[1]
|
||||
third := env[2]
|
||||
|
||||
// Currently the defined order at this time is the insertion order of the target manifest.
|
||||
assert.Equal(t, "SOME_ENV_VAR", first.(map[string]interface{})["name"])
|
||||
assert.Equal(t, "some_value", first.(map[string]interface{})["value"])
|
||||
|
||||
assert.Equal(t, "SOME_OTHER_ENV_VAR", second.(map[string]interface{})["name"])
|
||||
assert.Equal(t, "some_other_value", second.(map[string]interface{})["value"])
|
||||
|
||||
assert.Equal(t, "YET_ANOTHER_ENV_VAR", third.(map[string]interface{})["name"])
|
||||
assert.Equal(t, "yet_another_value", third.(map[string]interface{})["value"])
|
||||
})
|
||||
|
||||
t.Run("ignore-deployment-image-replicas-changes-additive", func(t *testing.T) {
|
||||
// given
|
||||
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
JSONPointers: []string{"/spec/replicas"},
|
||||
}, {
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
JQPathExpressions: []string{".spec.template.spec.containers[].image"},
|
||||
},
|
||||
}
|
||||
f := setupHttpProxy(t, ignores)
|
||||
live := test.YamlToUnstructured(testdata.MinimalImageReplicaDeploymentYaml)
|
||||
target := test.YamlToUnstructured(testdata.AdditionalImageReplicaDeploymentYaml)
|
||||
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(targets))
|
||||
metadata, ok, err := unstructured.NestedMap(targets[0].Object, "metadata")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
labels, ok := metadata["labels"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 2, len(labels))
|
||||
assert.Equal(t, "web", labels["appProcess"])
|
||||
|
||||
spec, ok, err := unstructured.NestedMap(targets[0].Object, "spec")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, int64(1), spec["replicas"])
|
||||
|
||||
template, ok := spec["template"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
tMetadata, ok := template["metadata"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
tLabels, ok := tMetadata["labels"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 2, len(tLabels))
|
||||
assert.Equal(t, "web", tLabels["appProcess"])
|
||||
|
||||
tSpec, ok := template["spec"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
containers, ok, err := unstructured.NestedSlice(tSpec, "containers")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 1, len(containers))
|
||||
|
||||
first := containers[0].(map[string]interface{})
|
||||
assert.Equal(t, "alpine:3", first["image"])
|
||||
|
||||
resources, ok := first["resources"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
requests, ok := resources["requests"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "400m", requests["cpu"])
|
||||
|
||||
env, ok, err := unstructured.NestedSlice(first, "env")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 1, len(env))
|
||||
|
||||
env0 := env[0].(map[string]interface{})
|
||||
assert.Equal(t, "EV", env0["name"])
|
||||
assert.Equal(t, "here", env0["value"])
|
||||
})
|
||||
}
|
||||
|
||||
func dig[T any](obj interface{}, path []interface{}) T {
|
||||
i := obj
|
||||
|
||||
for _, segment := range path {
|
||||
switch segment.(type) {
|
||||
case int:
|
||||
i = i.([]interface{})[segment.(int)]
|
||||
case string:
|
||||
i = i.(map[string]interface{})[segment.(string)]
|
||||
default:
|
||||
panic("invalid path for object")
|
||||
}
|
||||
}
|
||||
|
||||
return i.(T)
|
||||
}
|
||||
|
||||
28
controller/testdata/additional-image-replicas-deployment.yaml
vendored
Normal file
28
controller/testdata/additional-image-replicas-deployment.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
appProcess: web
|
||||
name: client
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: client
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
appProcess: web
|
||||
spec:
|
||||
containers:
|
||||
- image: alpine:2
|
||||
name: alpine
|
||||
resources:
|
||||
requests:
|
||||
cpu: 400m
|
||||
env:
|
||||
- name: EV
|
||||
value: here
|
||||
18
controller/testdata/data.go
vendored
18
controller/testdata/data.go
vendored
@@ -11,4 +11,22 @@ var (
|
||||
|
||||
//go:embed target-deployment-new-entries.yaml
|
||||
TargetDeploymentNewEntries string
|
||||
|
||||
//go:embed live-httpproxy.yaml
|
||||
LiveHTTPProxy string
|
||||
|
||||
//go:embed target-httpproxy.yaml
|
||||
TargetHTTPProxy string
|
||||
|
||||
//go:embed live-deployment-env-vars.yaml
|
||||
LiveDeploymentEnvVarsYaml string
|
||||
|
||||
//go:embed target-deployment-env-vars.yaml
|
||||
TargetDeploymentEnvVarsYaml string
|
||||
|
||||
//go:embed minimal-image-replicas-deployment.yaml
|
||||
MinimalImageReplicaDeploymentYaml string
|
||||
|
||||
//go:embed additional-image-replicas-deployment.yaml
|
||||
AdditionalImageReplicaDeploymentYaml string
|
||||
)
|
||||
|
||||
177
controller/testdata/live-deployment-env-vars.yaml
vendored
Normal file
177
controller/testdata/live-deployment-env-vars.yaml
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui'
|
||||
deployment.kubernetes.io/revision: '9'
|
||||
iksm-version: '2.0'
|
||||
kubectl.kubernetes.io/last-applied-configuration: >
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}}
|
||||
creationTimestamp: '2022-01-05T15:45:21Z'
|
||||
generation: 119
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:iksm-version': {}
|
||||
manager: janitor
|
||||
operation: Apply
|
||||
time: '2022-01-06T18:21:04Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:argocd.argoproj.io/tracking-id': {}
|
||||
'f:kubectl.kubernetes.io/last-applied-configuration': {}
|
||||
'f:spec':
|
||||
'f:progressDeadlineSeconds': {}
|
||||
'f:replicas': {}
|
||||
'f:revisionHistoryLimit': {}
|
||||
'f:selector': {}
|
||||
'f:strategy':
|
||||
'f:rollingUpdate':
|
||||
.: {}
|
||||
'f:maxSurge': {}
|
||||
'f:maxUnavailable': {}
|
||||
'f:type': {}
|
||||
'f:template':
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
.: {}
|
||||
'f:app': {}
|
||||
'f:spec':
|
||||
'f:containers':
|
||||
'k:{"name":"guestbook-ui"}':
|
||||
.: {}
|
||||
'f:env':
|
||||
.: {}
|
||||
'k:{"name":"SOME_ENV_VAR"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:value': {}
|
||||
'f:image': {}
|
||||
'f:imagePullPolicy': {}
|
||||
'f:name': {}
|
||||
'f:ports':
|
||||
.: {}
|
||||
'k:{"containerPort":80,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:containerPort': {}
|
||||
'f:protocol': {}
|
||||
'f:resources':
|
||||
.: {}
|
||||
'f:requests':
|
||||
.: {}
|
||||
'f:cpu': {}
|
||||
'f:memory': {}
|
||||
'f:terminationMessagePath': {}
|
||||
'f:terminationMessagePolicy': {}
|
||||
'f:dnsPolicy': {}
|
||||
'f:restartPolicy': {}
|
||||
'f:schedulerName': {}
|
||||
'f:securityContext': {}
|
||||
'f:terminationGracePeriodSeconds': {}
|
||||
manager: argocd
|
||||
operation: Update
|
||||
time: '2022-01-06T15:04:15Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:deployment.kubernetes.io/revision': {}
|
||||
'f:status':
|
||||
'f:availableReplicas': {}
|
||||
'f:conditions':
|
||||
.: {}
|
||||
'k:{"type":"Available"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'k:{"type":"Progressing"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'f:observedGeneration': {}
|
||||
'f:readyReplicas': {}
|
||||
'f:replicas': {}
|
||||
'f:updatedReplicas': {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
time: '2022-01-06T18:15:14Z'
|
||||
name: kustomize-guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: '8289211'
|
||||
uid: ef253575-ce44-4c5e-84ad-16e81d0df6eb
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 4
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: SOME_ENV_VAR
|
||||
value: some_value
|
||||
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 4
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-01-05T22:20:37Z'
|
||||
lastUpdateTime: '2022-01-05T22:43:47Z'
|
||||
message: >-
|
||||
ReplicaSet "kustomize-guestbook-ui-6549d54677" has successfully
|
||||
progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- lastTransitionTime: '2022-01-06T18:15:14Z'
|
||||
lastUpdateTime: '2022-01-06T18:15:14Z'
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: Available
|
||||
observedGeneration: 119
|
||||
readyReplicas: 4
|
||||
replicas: 4
|
||||
updatedReplicas: 4
|
||||
14
controller/testdata/live-httpproxy.yaml
vendored
Normal file
14
controller/testdata/live-httpproxy.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: projectcontour.io/v1
|
||||
kind: HTTPProxy
|
||||
metadata:
|
||||
name: my-http-proxy
|
||||
namespace: default
|
||||
spec:
|
||||
routes:
|
||||
- rateLimitPolicy:
|
||||
global:
|
||||
descriptors:
|
||||
- entries:
|
||||
- requestHeader:
|
||||
descriptorKey: sample-key
|
||||
headerName: sample-header
|
||||
21
controller/testdata/minimal-image-replicas-deployment.yaml
vendored
Normal file
21
controller/testdata/minimal-image-replicas-deployment.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
name: client
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: client
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
spec:
|
||||
containers:
|
||||
- image: alpine:3
|
||||
name: alpine
|
||||
resources: {}
|
||||
35
controller/testdata/target-deployment-env-vars.yaml
vendored
Normal file
35
controller/testdata/target-deployment-env-vars.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui'
|
||||
iksm-version: '1.0'
|
||||
name: kustomize-guestbook-ui
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: SOME_OTHER_ENV_VAR
|
||||
value: some_other_value
|
||||
- name: YET_ANOTHER_ENV_VAR
|
||||
value: yet_another_value
|
||||
- name: SOME_ENV_VAR
|
||||
value: different_value!
|
||||
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
23
controller/testdata/target-httpproxy.yaml
vendored
Normal file
23
controller/testdata/target-httpproxy.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: projectcontour.io/v1
|
||||
kind: HTTPProxy
|
||||
metadata:
|
||||
name: my-http-proxy
|
||||
namespace: default
|
||||
spec:
|
||||
routes:
|
||||
- rateLimitPolicy:
|
||||
global:
|
||||
descriptors:
|
||||
- entries:
|
||||
- requestHeaderValueMatch:
|
||||
headers:
|
||||
- contains: sample-key
|
||||
name: sample-header
|
||||
value: third
|
||||
- requestHeader:
|
||||
descriptorKey: sample-key
|
||||
headerName: sample-header
|
||||
- entries:
|
||||
- requestHeader:
|
||||
descriptorKey: sample-key
|
||||
headerName: sample-header
|
||||
@@ -1,48 +1,83 @@
|
||||
setTimeout(function() {
|
||||
const callbackName = 'callback_' + new Date().getTime();
|
||||
window[callbackName] = function (response) {
|
||||
const div = document.createElement('div');
|
||||
div.innerHTML = response.html;
|
||||
document.querySelector(".md-header__inner > .md-header__title").appendChild(div);
|
||||
const container = div.querySelector('.rst-versions');
|
||||
var caret = document.createElement('div');
|
||||
caret.innerHTML = "<i class='fa fa-caret-down dropdown-caret'></i>"
|
||||
caret.classList.add('dropdown-caret')
|
||||
div.querySelector('.rst-current-version').appendChild(caret);
|
||||
const targetNode = document.querySelector('.md-header__inner');
|
||||
const observerOptions = {
|
||||
childList: true,
|
||||
subtree: true
|
||||
};
|
||||
|
||||
const observerCallback = function(mutationsList, observer) {
|
||||
for (let mutation of mutationsList) {
|
||||
if (mutation.type === 'childList') {
|
||||
const titleElement = document.querySelector('.md-header__inner > .md-header__title');
|
||||
if (titleElement) {
|
||||
initializeVersionDropdown();
|
||||
observer.disconnect();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const observer = new MutationObserver(observerCallback);
|
||||
observer.observe(targetNode, observerOptions);
|
||||
|
||||
function getCurrentVersion() {
|
||||
const currentVersion = window.location.href.match(/\/en\/(release-(?:v\d+|[\d\.]+|\w+)|latest|stable)\//);
|
||||
if (currentVersion && currentVersion.length > 1) {
|
||||
return currentVersion[1];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function initializeVersionDropdown() {
|
||||
const callbackName = 'callback_' + new Date().getTime();
|
||||
window[callbackName] = function(response) {
|
||||
const div = document.createElement('div');
|
||||
div.innerHTML = response.html;
|
||||
document.querySelector(".md-header__inner > .md-header__title").appendChild(div);
|
||||
const container = div.querySelector('.rst-versions');
|
||||
var caret = document.createElement('div');
|
||||
caret.innerHTML = "<i class='fa fa-caret-down dropdown-caret'></i>";
|
||||
caret.classList.add('dropdown-caret');
|
||||
div.querySelector('.rst-current-version').appendChild(caret);
|
||||
|
||||
div.querySelector('.rst-current-version').addEventListener('click', function() {
|
||||
container.classList.toggle('shift-up');
|
||||
});
|
||||
};
|
||||
|
||||
var CSSLink = document.createElement('link');
|
||||
CSSLink.rel='stylesheet';
|
||||
CSSLink.rel = 'stylesheet';
|
||||
CSSLink.href = '/assets/versions.css';
|
||||
document.getElementsByTagName('head')[0].appendChild(CSSLink);
|
||||
|
||||
var script = document.createElement('script');
|
||||
script.src = 'https://argo-cd.readthedocs.io/_/api/v2/footer_html/?'+
|
||||
'callback=' + callbackName + '&project=argo-cd&page=&theme=mkdocs&format=jsonp&docroot=docs&source_suffix=.md&version=' + (window['READTHEDOCS_DATA'] || { version: 'latest' }).version;
|
||||
const currentVersion = getCurrentVersion();
|
||||
script.src = 'https://argo-cd.readthedocs.io/_/api/v2/footer_html/?' +
|
||||
'callback=' + callbackName + '&project=argo-cd&page=&theme=mkdocs&format=jsonp&docroot=docs&source_suffix=.md&version=' + (currentVersion || 'latest');
|
||||
document.getElementsByTagName('head')[0].appendChild(script);
|
||||
}, 0);
|
||||
}
|
||||
|
||||
// VERSION WARNINGS
|
||||
window.addEventListener("DOMContentLoaded", function() {
|
||||
var rtdData = window['READTHEDOCS_DATA'] || { version: 'latest' };
|
||||
var margin = 30;
|
||||
var headerHeight = document.getElementsByClassName("md-header")[0].offsetHeight;
|
||||
if (rtdData.version === "latest") {
|
||||
document.querySelector("div[data-md-component=announce]").innerHTML = "<div id='announce-msg'>You are viewing the docs for an unreleased version of Argo CD, <a href='https://argo-cd.readthedocs.io/en/stable/'>click here to go to the latest stable version.</a></div>"
|
||||
var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin
|
||||
document.querySelector("header.md-header").style.top = bannerHeight +"px";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}"
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}"
|
||||
}
|
||||
else if (rtdData.version !== "stable") {
|
||||
document.querySelector("div[data-md-component=announce]").innerHTML = "<div id='announce-msg'>You are viewing the docs for a previous version of Argo CD, <a href='https://argo-cd.readthedocs.io/en/stable/'>click here to go to the latest stable version.</a></div>"
|
||||
var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin
|
||||
document.querySelector("header.md-header").style.top = bannerHeight +"px";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}"
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:"+ (bannerHeight+headerHeight)+"px !important; }}"
|
||||
var headerHeight = document.getElementsByClassName("md-header")[0].offsetHeight;
|
||||
const currentVersion = getCurrentVersion();
|
||||
if (currentVersion) {
|
||||
if (currentVersion === "latest") {
|
||||
document.querySelector("div[data-md-component=announce]").innerHTML = "<div id='announce-msg'>You are viewing the docs for an unreleased version of Argo CD, <a href='https://argo-cd.readthedocs.io/en/stable/'>click here to go to the latest stable version.</a></div>";
|
||||
var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin;
|
||||
document.querySelector("header.md-header").style.top = bannerHeight + "px";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
} else if (currentVersion !== "stable") {
|
||||
document.querySelector("div[data-md-component=announce]").innerHTML = "<div id='announce-msg'>You are viewing the docs for a previous version of Argo CD, <a href='https://argo-cd.readthedocs.io/en/stable/'>click here to go to the latest stable version.</a></div>";
|
||||
var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin;
|
||||
document.querySelector("header.md-header").style.top = bannerHeight + "px";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
56
docs/faq.md
56
docs/faq.md
@@ -36,7 +36,7 @@ which might cause health check to return `Progressing` state instead of `Healthy
|
||||
As workaround Argo CD allows providing [health check](operator-manual/health.md) customization which overrides default
|
||||
behavior.
|
||||
|
||||
If you are using Traefik for your Ingress, you can update the Traefik config to publish the loadBalancer IP using [publishedservice](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#publishedservice), which will resolve this issue.
|
||||
If you are using Traefik for your Ingress, you can update the Traefik config to publish the loadBalancer IP using [publishedservice](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#publishedservice), which will resolve this issue.
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
@@ -97,7 +97,7 @@ data:
|
||||
|
||||
## After deploying my Helm application with Argo CD I cannot see it with `helm ls` and other Helm commands
|
||||
|
||||
When deploying a Helm application Argo CD is using Helm
|
||||
When deploying a Helm application Argo CD is using Helm
|
||||
only as a template mechanism. It runs `helm template` and
|
||||
then deploys the resulting manifests on the cluster instead of doing `helm install`. This means that you cannot use any Helm command
|
||||
to view/verify the application. It is fully managed by Argo CD.
|
||||
@@ -140,15 +140,15 @@ Argo CD automatically sets the `app.kubernetes.io/instance` label and uses it to
|
||||
If the tool does this too, this causes confusion. You can change this label by setting
|
||||
the `application.instanceLabelKey` value in the `argocd-cm`. We recommend that you use `argocd.argoproj.io/instance`.
|
||||
|
||||
!!! note
|
||||
!!! note
|
||||
When you make this change your applications will become out of sync and will need re-syncing.
|
||||
|
||||
See [#1482](https://github.com/argoproj/argo-cd/issues/1482).
|
||||
|
||||
## How often does Argo CD check for changes to my Git or Helm repository ?
|
||||
|
||||
The default polling interval is 3 minutes (180 seconds).
|
||||
You can change the setting by updating the `timeout.reconciliation` value in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, Argo CD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications.
|
||||
The default polling interval is 3 minutes (180 seconds) with a configurable jitter.
|
||||
You can change the setting by updating the `timeout.reconciliation` value and the `timeout.reconciliation.jitter` in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, Argo CD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications.
|
||||
|
||||
|
||||
## Why Are My Resource Limits `Out Of Sync`?
|
||||
@@ -250,7 +250,7 @@ There are two parts to the message:
|
||||
|
||||
> map[name:**KEY_BC** value:150] map[name:**KEY_BC** value:500] map[name:**KEY_BD** value:250] map[name:**KEY_BD** value:500] map[name:KEY_BI value:something]
|
||||
|
||||
You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just
|
||||
You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just
|
||||
|
||||
`]`
|
||||
|
||||
@@ -259,7 +259,7 @@ There are two parts to the message:
|
||||
This includes all of the keys. It's included for debugging purposes -- you don't need to pay much attention to it. It will give you a hint about the precise location in the list for the duplicated keys:
|
||||
|
||||
> map[name:KEY_AA] map[name:KEY_AB] map[name:KEY_AC] map[name:KEY_AD] map[name:KEY_AE] map[name:KEY_AF] map[name:KEY_AG] map[name:KEY_AH] map[name:KEY_AI] map[name:KEY_AJ] map[name:KEY_AK] map[name:KEY_AL] map[name:KEY_AM] map[name:KEY_AN] map[name:KEY_AO] map[name:KEY_AP] map[name:KEY_AQ] map[name:KEY_AR] map[name:KEY_AS] map[name:KEY_AT] map[name:KEY_AU] map[name:KEY_AV] map[name:KEY_AW] map[name:KEY_AX] map[name:KEY_AY] map[name:KEY_AZ] map[name:KEY_BA] map[name:KEY_BB] map[name:**KEY_BC**] map[name:**KEY_BD**] map[name:KEY_BE] map[name:KEY_BF] map[name:KEY_BG] map[name:KEY_BH] map[name:KEY_BI] map[name:**KEY_BC**] map[name:**KEY_BD**]
|
||||
|
||||
|
||||
`]`
|
||||
|
||||
In this case, the duplicated keys have been **emphasized** to help you identify the problematic keys. Many editors have the ability to highlight all instances of a string, using such an editor can help with such problems.
|
||||
@@ -268,3 +268,45 @@ The most common instance of this error is with `env:` fields for `containers`.
|
||||
|
||||
!!! note "Dynamic applications"
|
||||
It's possible that your application is being generated by a tool in which case the duplication might not be evident within the scope of a single file. If you have trouble debugging this problem, consider filing a ticket to the owner of the generator tool asking them to improve its validation and error reporting.
|
||||
|
||||
## How to rotate Redis secret?
|
||||
* Delete `argocd-redis` secret in the namespace where Argo CD is installed.
|
||||
```bash
|
||||
kubectl delete secret argocd-redis -n <argocd namesapce>
|
||||
```
|
||||
* If you are running Redis in HA mode, restart Redis in HA.
|
||||
```bash
|
||||
kubectl rollout restart deployment argocd-redis-ha-haproxy
|
||||
kubectl rollout restart statefulset argocd-redis-ha-server
|
||||
```
|
||||
* If you are running Redis in non-HA mode, restart Redis.
|
||||
```bash
|
||||
kubectl rollout restart deployment argocd-redis
|
||||
```
|
||||
* Restart other components.
|
||||
```bash
|
||||
kubectl rollout restart deployment argocd-server argocd-repo-server
|
||||
kubectl rollout restart statefulset argocd-application-controller
|
||||
```
|
||||
|
||||
## How to turn off Redis auth if users really want to?
|
||||
|
||||
Argo CD default installation is now configured automatically enable Redis authentication.
|
||||
If for some reason authenticated Redis does not work for you and you want to use non-authenticated Redis, here are the steps:
|
||||
|
||||
* You need to have your own Redis installation.
|
||||
* Configure Argo CD to use your own Redis instance. See this [doc](https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cmd-params-cm-yaml/) for the Argo CD configuration.
|
||||
* If you already installed Redis shipped with Argo CD, you also need to clean up the existing components:
|
||||
* When HA Redis is used:
|
||||
* kubectl delete deployment argocd-redis-ha-haproxy
|
||||
* kubectl delete statefulset argocd-redis-ha-server
|
||||
* When non-HA Redis is used:
|
||||
* kubectl delete deployment argocd-redis
|
||||
* Remove environment variable `REDIS_PASSWORD` from the following manifests
|
||||
* Deployment: argocd-repo-server:
|
||||
* Deployment: argocd-server
|
||||
* StatefulSet: argocd-application-controller
|
||||
|
||||
## How do I provide my own Redis credentials?
|
||||
The Redis password is stored in Kubernetes secret `argocd-redis` with key `auth` in the namespace where Argo CD is installed.
|
||||
You can config your secret provider to generate Kubernetes secret accordingly.
|
||||
@@ -38,6 +38,9 @@ Do one of:
|
||||
|
||||
Use `argocd login --core` to [configure](./user-guide/commands/argocd_login.md) CLI access and skip steps 3-5.
|
||||
|
||||
!!! note
|
||||
This default installation for Redis is using password authentication. The Redis password is stored in Kubernetes secret `argocd-redis` with key `auth` in the namespace where Argo CD is installed.
|
||||
|
||||
## 2. Download Argo CD CLI
|
||||
|
||||
Download the latest Argo CD version from [https://github.com/argoproj/argo-cd/releases/latest](https://github.com/argoproj/argo-cd/releases/latest). More detailed installation instructions can be found via the [CLI installation documentation](cli_installation.md).
|
||||
|
||||
@@ -326,7 +326,7 @@ As with other generators, clusters *must* already be defined within Argo CD, in
|
||||
In addition to the flattened key/value pairs from the configuration file, the following generator parameters are provided:
|
||||
|
||||
- `{{.path.path}}`: The path to the directory containing matching configuration file within the Git repository. Example: `/clusters/clusterA`, if the config file was `/clusters/clusterA/config.json`
|
||||
- `{{index .path n}}`: The path to the matching configuration file within the Git repository, split into array elements (`n` - array index). Example: `index .path 0: clusters`, `index .path 1: clusterA`
|
||||
- `{{index .path.segments n}}`: The path to the matching configuration file within the Git repository, split into array elements (`n` - array index). Example: `index .path.segments 0: clusters`, `index .path.segments 1: clusterA`
|
||||
- `{{.path.basename}}`: Basename of the path to the directory containing the configuration file (e.g. `clusterA`, with the above example.)
|
||||
- `{{.path.basenameNormalized}}`: This field is the same as `.path.basename` with unsupported characters replaced with `-` (e.g. a `path` of `/directory/directory_2`, and `.path.basename` of `directory_2` would produce `directory-2` here).
|
||||
- `{{.path.filename}}`: The matched filename. e.g., `config.json` in the above example.
|
||||
@@ -360,7 +360,7 @@ spec:
|
||||
files:
|
||||
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
|
||||
values:
|
||||
base_dir: "{{index .path 0}}/{{index .path 1}}/{{index .path 2}}"
|
||||
base_dir: "{{index .path.segments 0}}/{{index .path.segments 1}}/{{index .path.segments 2}}"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.cluster.name}}-guestbook'
|
||||
|
||||
@@ -111,16 +111,15 @@ In this example, the ApplicationSet controller will generate an `Application` re
|
||||
|
||||
## Template Patch
|
||||
|
||||
Templating is only available on string type. However, some uses cases may require to apply templating on other types.
|
||||
Templating is only available on string type. However, some use cases may require applying templating on other types.
|
||||
|
||||
Example:
|
||||
|
||||
- Set the automated sync policy
|
||||
- Switch prune boolean to true
|
||||
- Add multiple helm value files
|
||||
|
||||
Argo CD has a `templatePatch` feature to allow advanced templating. It supports both json and yaml.
|
||||
- Conditionally set the automated sync policy.
|
||||
- Conditionally switch prune boolean to `true`.
|
||||
- Add multiple helm value files from a list.
|
||||
|
||||
The `templatePatch` feature enables advanced templating, with support for `json` and `yaml`.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -174,3 +173,6 @@ spec:
|
||||
|
||||
The `spec.project` field is not supported in `templatePatch`. If you need to change the project, you can use the
|
||||
`spec.project` field in the `template` field.
|
||||
|
||||
!!! important
|
||||
When writing a `templatePatch`, you're crafting a patch. So, if the patch includes an empty `spec: # nothing in here`, it will effectively clear out existing fields. See [#17040](https://github.com/argoproj/argo-cd/issues/17040) for an example of this behavior.
|
||||
|
||||
@@ -316,6 +316,12 @@ data:
|
||||
# published to the repository. Reconciliation by timeout is disabled if timeout is set to 0. Three minutes by default.
|
||||
# > Note: argocd-repo-server deployment must be manually restarted after changing the setting.
|
||||
timeout.reconciliation: 180s
|
||||
# With a large number of applications, the periodic refresh for each application can cause a spike in the refresh queue
|
||||
# and can cause a spike in the repo-server component. To avoid this, you can set a jitter to the sync timeout, which will
|
||||
# spread out the refreshes and give time to the repo-server to catch up. The jitter is the maximum duration that can be
|
||||
# added to the sync timeout. So, if the sync timeout is 3 minutes and the jitter is 1 minute, then the actual timeout will
|
||||
# be between 3 and 4 minutes. Disabled when the value is 0, defaults to 0.
|
||||
timeout.reconciliation.jitter: 0
|
||||
|
||||
# cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default.
|
||||
cluster.inClusterEnabled: "true"
|
||||
@@ -406,3 +412,5 @@ data:
|
||||
cluster:
|
||||
name: some-cluster
|
||||
server: https://some-cluster
|
||||
# The maximum size of the payload that can be sent to the webhook server.
|
||||
webhook.maxPayloadSizeMB: 1024
|
||||
@@ -90,6 +90,9 @@ data:
|
||||
server.k8sclient.retry.max: "0"
|
||||
# The initial backoff delay on the first retry attempt in ms. Subsequent retries will double this backoff time up to a maximum threshold
|
||||
server.k8sclient.retry.base.backoff: "100"
|
||||
# Semicolon-separated list of content types allowed on non-GET requests. Set an empty string to allow all. Be aware
|
||||
# that allowing content types besides application/json may make your API more vulnerable to CSRF attacks.
|
||||
server.api.content.types: "application/json"
|
||||
|
||||
# Set the logging format. One of: text|json (default "text")
|
||||
server.log.format: "text"
|
||||
|
||||
@@ -17,16 +17,10 @@ which does not require a restart of the application controller pods.
|
||||
|
||||
## Enabling Dynamic Distribution of Clusters
|
||||
|
||||
This feature is disabled by default while it is in alpha. To enable it, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller.
|
||||
|
||||
In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize
|
||||
overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. The
|
||||
dynamic distribution code automatically kicks in when the controller is deployed as a Deployment.
|
||||
This feature is disabled by default while it is in alpha. In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. Also, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller as a deployment.
|
||||
|
||||
!!! important
|
||||
The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of
|
||||
this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes
|
||||
to avoid issues.
|
||||
The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes to avoid issues.
|
||||
|
||||
Note the introduction of new environment variable `ARGOCD_CONTROLLER_HEARTBEAT_TIME`. The environment variable is explained in [working of Dynamic Distribution Heartbeat Process](#working-of-dynamic-distribution)
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ performance. For performance reasons the controller monitors and caches only the
|
||||
preferred version into a version of the resource stored in Git. If `kubectl convert` fails because the conversion is not supported then the controller falls back to Kubernetes API query which slows down
|
||||
reconciliation. In this case, we advise to use the preferred resource version in Git.
|
||||
|
||||
* The controller polls Git every 3m by default. You can change this duration using the `timeout.reconciliation` setting in the `argocd-cm` ConfigMap. The value of `timeout.reconciliation` is a duration string e.g `60s`, `1m`, `1h` or `1d`.
|
||||
* The controller polls Git every 3m by default. You can change this duration using the `timeout.reconciliation` and `timeout.reconciliation.jitter` setting in the `argocd-cm` ConfigMap. The value of the fields is a duration string e.g `60s`, `1m`, `1h` or `1d`.
|
||||
|
||||
* If the controller is managing too many clusters and uses too much memory then you can shard clusters across multiple
|
||||
controller replicas. To enable sharding, increase the number of replicas in `argocd-application-controller` `StatefulSet`
|
||||
@@ -244,30 +244,41 @@ spec:
|
||||
# ...
|
||||
```
|
||||
|
||||
### Application Sync Timeout & Jitter
|
||||
|
||||
Argo CD has a timeout for application syncs. It will trigger a refresh for each application periodically when the timeout expires.
|
||||
With a large number of applications, this will cause a spike in the refresh queue and can cause a spike to the repo-server component. To avoid this, you can set a jitter to the sync timeout which will spread out the refreshes and give time to the repo-server to catch up.
|
||||
|
||||
The jitter is the maximum duration that can be added to the sync timeout, so if the sync timeout is 5 minutes and the jitter is 1 minute, then the actual timeout will be between 5 and 6 minutes.
|
||||
|
||||
To configure the jitter you can set the following environment variables:
|
||||
|
||||
* `ARGOCD_RECONCILIATION_JITTER` - The jitter to apply to the sync timeout. Disabled when value is 0. Defaults to 0.
|
||||
|
||||
## Rate Limiting Application Reconciliations
|
||||
|
||||
To prevent high controller resource usage or sync loops caused either due to misbehaving apps or other environment specific factors,
|
||||
To prevent high controller resource usage or sync loops caused either due to misbehaving apps or other environment specific factors,
|
||||
we can configure rate limits on the workqueues used by the application controller. There are two types of rate limits that can be configured:
|
||||
|
||||
* Global rate limits
|
||||
* Per item rate limits
|
||||
|
||||
The final rate limiter uses a combination of both and calculates the final backoff as `max(globalBackoff, perItemBackoff)`.
|
||||
The final rate limiter uses a combination of both and calculates the final backoff as `max(globalBackoff, perItemBackoff)`.
|
||||
|
||||
### Global rate limits
|
||||
|
||||
This is enabled by default, it is a simple bucket based rate limiter that limits the number of items that can be queued per second.
|
||||
This is useful to prevent a large number of apps from being queued at the same time.
|
||||
|
||||
This is disabled by default, it is a simple bucket based rate limiter that limits the number of items that can be queued per second.
|
||||
This is useful to prevent a large number of apps from being queued at the same time.
|
||||
|
||||
To configure the bucket limiter you can set the following environment variables:
|
||||
|
||||
* `WORKQUEUE_BUCKET_SIZE` - The number of items that can be queued in a single burst. Defaults to 500.
|
||||
* `WORKQUEUE_BUCKET_QPS` - The number of items that can be queued per second. Defaults to 50.
|
||||
* `WORKQUEUE_BUCKET_QPS` - The number of items that can be queued per second. Defaults to MaxFloat64, which disables the limiter.
|
||||
|
||||
### Per item rate limits
|
||||
### Per item rate limits
|
||||
|
||||
This by default returns a fixed base delay/backoff value but can be configured to return exponential values, read further to understand it's working.
|
||||
Per item rate limiter limits the number of times a particular item can be queued. This is based on exponential backoff where the backoff time for an item keeps increasing exponentially
|
||||
This by default returns a fixed base delay/backoff value but can be configured to return exponential values.
|
||||
Per item rate limiter limits the number of times a particular item can be queued. This is based on exponential backoff where the backoff time for an item keeps increasing exponentially
|
||||
if it is queued multiple times in a short period, but the backoff is reset automatically if a configured `cool down` period has elapsed since the last time the item was queued.
|
||||
|
||||
To configure the per item limiter you can set the following environment variables:
|
||||
@@ -277,16 +288,16 @@ To configure the per item limiter you can set the following environment variable
|
||||
* `WORKQUEUE_MAX_DELAY_NS` : The max delay in nanoseconds, this is the max backoff limit. Defaults to 3 * 10^9 (=3s)
|
||||
* `WORKQUEUE_BACKOFF_FACTOR` : The backoff factor, this is the factor by which the backoff is increased for each retry. Defaults to 1.5
|
||||
|
||||
The formula used to calculate the backoff time for an item, where `numRequeue` is the number of times the item has been queued
|
||||
The formula used to calculate the backoff time for an item, where `numRequeue` is the number of times the item has been queued
|
||||
and `lastRequeueTime` is the time at which the item was last queued:
|
||||
|
||||
- When `WORKQUEUE_FAILURE_COOLDOWN_NS` != 0 :
|
||||
|
||||
```
|
||||
backoff = time.Since(lastRequeueTime) >= WORKQUEUE_FAILURE_COOLDOWN_NS ?
|
||||
WORKQUEUE_BASE_DELAY_NS :
|
||||
backoff = time.Since(lastRequeueTime) >= WORKQUEUE_FAILURE_COOLDOWN_NS ?
|
||||
WORKQUEUE_BASE_DELAY_NS :
|
||||
min(
|
||||
WORKQUEUE_MAX_DELAY_NS,
|
||||
WORKQUEUE_MAX_DELAY_NS,
|
||||
WORKQUEUE_BASE_DELAY_NS * WORKQUEUE_BACKOFF_FACTOR ^ (numRequeue)
|
||||
)
|
||||
```
|
||||
|
||||
@@ -67,7 +67,7 @@ metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
application.namespaces: app-team-one, app-team-two
|
||||
notificationscontroller.selfservice.enabled: true
|
||||
notificationscontroller.selfservice.enabled: "true"
|
||||
```
|
||||
|
||||
To use this feature, you can deploy configmap named `argocd-notifications-cm` and possibly a secret `argocd-notifications-secret` in the namespace where the Argo CD application lives.
|
||||
|
||||
@@ -12,8 +12,8 @@ To be able to send notifications with argocd-notifications you have to create an
|
||||
8. Give your integration a name, copy the "API key" and safe it somewhere for later
|
||||
9. Make sure the checkboxes for "Create and Update Access" and "enable" are selected, disable the other checkboxes to remove unnecessary permissions
|
||||
10. Click "Safe Integration" at the bottom
|
||||
11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the us/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (european api).
|
||||
12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the US/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (European API).
|
||||
12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the Opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
# Pagerduty
|
||||
# PagerDuty
|
||||
|
||||
## Parameters
|
||||
|
||||
The Pagerduty notification service is used to create pagerduty incidents and requires specifying the following settings:
|
||||
The PagerDuty notification service is used to create PagerDuty incidents and requires specifying the following settings:
|
||||
|
||||
* `pagerdutyToken` - the pagerduty auth token
|
||||
* `pagerdutyToken` - the PagerDuty auth token
|
||||
* `from` - email address of a valid user associated with the account making the request.
|
||||
* `serviceID` - The ID of the resource.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
The following snippet contains sample Pagerduty service configuration:
|
||||
The following snippet contains sample PagerDuty service configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -35,7 +35,7 @@ data:
|
||||
|
||||
## Template
|
||||
|
||||
[Notification templates](../templates.md) support specifying subject for pagerduty notifications:
|
||||
[Notification templates](../templates.md) support specifying subject for PagerDuty notifications:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -62,5 +62,5 @@ apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "<serviceID for Pagerduty>"
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "<serviceID for PagerDuty>"
|
||||
```
|
||||
|
||||
@@ -74,5 +74,5 @@ apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "<serviceID for Pagerduty>"
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "<serviceID for PagerDuty>"
|
||||
```
|
||||
|
||||
@@ -15,70 +15,72 @@ argocd-application-controller [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--app-hard-resync int Time period in seconds for application hard resync.
|
||||
--app-resync int Time period in seconds for application resync. (default 180)
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
--dynamic-cluster-distribution-enabled Enables dynamic cluster distribution.
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-application-controller
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20)
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric
|
||||
--metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
|
||||
--metrics-port int Start metrics server on given port (default 8082)
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--operation-processors int Number of application operation processors (default 10)
|
||||
--otlp-address string OpenTelemetry collector address to send traces to
|
||||
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
|
||||
--otlp-headers stringToString List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2) (default [])
|
||||
--otlp-insecure OpenTelemetry collector insecure mode (default true)
|
||||
--password string Password for basic authentication to the API server
|
||||
--persist-resource-health Enables storing the managed resources health in the Application CRD (default true)
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-error-grace-period-seconds int Grace period in seconds for ignoring consecutive errors while communicating with repo server. (default 180)
|
||||
--repo-server string Repo server address. (default "argocd-repo-server:8081")
|
||||
--repo-server-plaintext Disable TLS on connections to repo server
|
||||
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
|
||||
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--server-side-diff-enabled Feature flag to enable ServerSide diff. Default ("false")
|
||||
--sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--status-processors int Number of application status processors (default 20)
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
--wq-backoff-factor float Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5 (default 1.5)
|
||||
--wq-basedelay-ns duration Set Workqueue Per Item Rate Limiter Base Delay duration in nanoseconds, default 1000000 (1ms) (default 1ms)
|
||||
--wq-bucket-qps int Set Workqueue Rate Limiter Bucket QPS, default 50 (default 50)
|
||||
--wq-bucket-size int Set Workqueue Rate Limiter Bucket Size, default 500 (default 500)
|
||||
--wq-cooldown-ns duration Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)
|
||||
--wq-maxdelay-ns duration Set Workqueue Per Item Rate Limiter Max Delay duration in nanoseconds, default 1000000000 (1s) (default 1s)
|
||||
--app-hard-resync int Time period in seconds for application hard resync.
|
||||
--app-resync int Time period in seconds for application resync. (default 180)
|
||||
--app-resync-jitter int Maximum time period in seconds to add as a delay jitter for application resync.
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
--dynamic-cluster-distribution-enabled Enables dynamic cluster distribution.
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-application-controller
|
||||
--ignore-normalizer-jq-execution-timeout-seconds duration Set ignore normalizer JQ execution timeout
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20)
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric
|
||||
--metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
|
||||
--metrics-port int Start metrics server on given port (default 8082)
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--operation-processors int Number of application operation processors (default 10)
|
||||
--otlp-address string OpenTelemetry collector address to send traces to
|
||||
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
|
||||
--otlp-headers stringToString List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2) (default [])
|
||||
--otlp-insecure OpenTelemetry collector insecure mode (default true)
|
||||
--password string Password for basic authentication to the API server
|
||||
--persist-resource-health Enables storing the managed resources health in the Application CRD (default true)
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-error-grace-period-seconds int Grace period in seconds for ignoring consecutive errors while communicating with repo server. (default 180)
|
||||
--repo-server string Repo server address. (default "argocd-repo-server:8081")
|
||||
--repo-server-plaintext Disable TLS on connections to repo server
|
||||
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
|
||||
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--server-side-diff-enabled Feature flag to enable ServerSide diff. Default ("false")
|
||||
--sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--status-processors int Number of application status processors (default 20)
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
--wq-backoff-factor float Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5 (default 1.5)
|
||||
--wq-basedelay-ns duration Set Workqueue Per Item Rate Limiter Base Delay duration in nanoseconds, default 1000000 (1ms) (default 1ms)
|
||||
--wq-bucket-qps float Set Workqueue Rate Limiter Bucket QPS, default set to MaxFloat64 which disables the bucket limiter (default 1.7976931348623157e+308)
|
||||
--wq-bucket-size int Set Workqueue Rate Limiter Bucket Size, default 500 (default 500)
|
||||
--wq-cooldown-ns duration Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)
|
||||
--wq-maxdelay-ns duration Set Workqueue Per Item Rate Limiter Max Delay duration in nanoseconds, default 1000000000 (1s) (default 1s)
|
||||
```
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ argocd-repo-server [flags]
|
||||
--disable-helm-manifest-max-extracted-size Disable maximum size of helm manifest archives when extracted
|
||||
--disable-tls Disable TLS on the gRPC endpoint
|
||||
--helm-manifest-max-extracted-size string Maximum size of helm manifest archives when extracted (default "1G")
|
||||
--helm-registry-max-index-size string Maximum size of registry index file (default "1G")
|
||||
-h, --help help for argocd-repo-server
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
|
||||
@@ -26,6 +26,7 @@ argocd-server [flags]
|
||||
|
||||
```
|
||||
--address string Listen on given address (default "0.0.0.0")
|
||||
--api-content-types string Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty. (default "application/json")
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces where application resources can be managed in
|
||||
--as string Username to impersonate for the operation
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 2.7 | v1.26, v1.25, v1.24, v1.23 |
|
||||
| 2.6 | v1.24, v1.23, v1.22 |
|
||||
| 2.5 | v1.24, v1.23, v1.22 |
|
||||
|
||||
| 2.10 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.9 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.8 | v1.27, v1.26, v1.25, v1.24 |
|
||||
|
||||
@@ -3,3 +3,56 @@
|
||||
## Upgraded Kustomize Version
|
||||
|
||||
Note that bundled Kustomize version has been upgraded from 5.1.0 to 5.2.1.
|
||||
|
||||
## Egress NetworkPolicy for `argocd-redis` and `argocd-redis-ha-haproxy`
|
||||
|
||||
Starting with Argo CD 2.9.16, the NetworkPolicy for the `argocd-redis` and `argocd-redis-ha-haproxy` dropped Egress restrictions. This change was made
|
||||
to allow access to the Kubernetes API to create a secret to secure Redis access.
|
||||
|
||||
To retain similar networking restrictions as before 2.9.16, you can add an Egress rule to allow access only to the
|
||||
Kubernetes API and access needed by Redis itself. The Egress rule for Kubernetes access will depend entirely on your
|
||||
Kubernetes setup. The access for Redis itself can be allowed by adding the following to the
|
||||
`argocd-redis-network-policy` NetworkPolicy:
|
||||
|
||||
```diff
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: argocd-redis-network-policy
|
||||
spec:
|
||||
policyTypes:
|
||||
- Ingress
|
||||
+ - Egress
|
||||
+ egress:
|
||||
+ - ports:
|
||||
+ - port: 53
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
|
||||
```diff
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: argocd-redis-ha-haproxy
|
||||
spec:
|
||||
policyTypes:
|
||||
- Ingress
|
||||
+ - Egress
|
||||
+ egress:
|
||||
+ - ports:
|
||||
+ - port: 6379
|
||||
+ protocol: TCP
|
||||
+ - port: 26379
|
||||
+ protocol: TCP
|
||||
+ to:
|
||||
+ - podSelector:
|
||||
+ matchLabels:
|
||||
+ app.kubernetes.io/name: argocd-redis-ha
|
||||
+ - ports:
|
||||
+ - port: 53
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
@@ -10,3 +10,69 @@ removed.
|
||||
|
||||
To avoid unexpected behavior, follow the [client-side to server-side resource upgrade guide](https://kubernetes.io/docs/reference/using-api/server-side-apply/#upgrading-from-client-side-apply-to-server-side-apply)
|
||||
before enabling `managedNamespaceMetadata` on an existing namespace.
|
||||
|
||||
## Upgraded Helm Version
|
||||
|
||||
Note that bundled Helm version has been upgraded from 3.13.2 to 3.14.3.
|
||||
|
||||
## Egress NetworkPolicy for `argocd-redis` and `argocd-redis-ha-haproxy`
|
||||
|
||||
Starting with Argo CD 2.10.11, the NetworkPolicy for the `argocd-redis` and `argocd-redis-ha-haproxy` dropped Egress restrictions. This change was made
|
||||
to allow access to the Kubernetes API to create a secret to secure Redis access.
|
||||
|
||||
To retain similar networking restrictions as before 2.10.11, you can add an Egress rule to allow access only to the
|
||||
Kubernetes API and access needed by Redis itself. The Egress rule for Kubernetes access will depend entirely on your
|
||||
Kubernetes setup. The access for Redis itself can be allowed by adding the following to the
|
||||
`argocd-redis-network-policy` NetworkPolicy:
|
||||
|
||||
```diff
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: argocd-redis-network-policy
|
||||
spec:
|
||||
policyTypes:
|
||||
- Ingress
|
||||
+ - Egress
|
||||
+ egress:
|
||||
+ - ports:
|
||||
+ - port: 53
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
|
||||
```diff
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: argocd-redis-ha-haproxy
|
||||
spec:
|
||||
policyTypes:
|
||||
- Ingress
|
||||
+ - Egress
|
||||
+ egress:
|
||||
+ - ports:
|
||||
+ - port: 6379
|
||||
+ protocol: TCP
|
||||
+ - port: 26379
|
||||
+ protocol: TCP
|
||||
+ to:
|
||||
+ - podSelector:
|
||||
+ matchLabels:
|
||||
+ app.kubernetes.io/name: argocd-redis-ha
|
||||
+ - ports:
|
||||
+ - port: 53
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
|
||||
## Sanitized project API response
|
||||
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
> **Note:** The 2.10 series has been EOL for some time and has not received security updates. 2.10.18 was patched for critical
|
||||
> CVE-2025-55190 but was not patched for other vulnerabilities. It is important to upgrade to a supported version as quickly as possible.
|
||||
|
||||
@@ -19,6 +19,8 @@ URL configured in the Git provider should use the `/api/webhook` endpoint of you
|
||||
(e.g. `https://argocd.example.com/api/webhook`). If you wish to use a shared secret, input an
|
||||
arbitrary value in the secret. This value will be used when configuring the webhook in the next step.
|
||||
|
||||
To prevent DDoS attacks with unauthenticated webhook events (the `/api/webhook` endpoint currently lacks rate limiting protection), it is recommended to limit the payload size. You can achieve this by configuring the `argocd-cm` ConfigMap with the `webhook.maxPayloadSizeMB` attribute. The default value is 1GB.
|
||||
|
||||
## Github
|
||||
|
||||

|
||||
|
||||
@@ -139,6 +139,7 @@ $ argocd admin initial-password reset
|
||||
* [argocd admin initial-password](argocd_admin_initial-password.md) - Prints initial password to log in to Argo CD for the first time
|
||||
* [argocd admin notifications](argocd_admin_notifications.md) - Set of CLI commands that helps manage notifications settings
|
||||
* [argocd admin proj](argocd_admin_proj.md) - Manage projects configuration
|
||||
* [argocd admin redis-initial-password](argocd_admin_redis-initial-password.md) - Ensure the Redis password exists, creating a new one if necessary.
|
||||
* [argocd admin repo](argocd_admin_repo.md) - Manage repositories configuration
|
||||
* [argocd admin settings](argocd_admin_settings.md) - Provides set of commands for settings validation and troubleshooting
|
||||
|
||||
|
||||
@@ -11,32 +11,33 @@ argocd admin app get-reconcile-results PATH [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
-h, --help help for get-reconcile-results
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--l string Label selector
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--o string Output format (yaml|json) (default "yaml")
|
||||
--password string Password for basic authentication to the API server
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--refresh If set to true then recalculates apps reconciliation
|
||||
--repo-server string Repo server address.
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--server-side-diff If set to "true" will use server-side diff while comparing resources. Default ("false")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
-h, --help help for get-reconcile-results
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--l string Label selector
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--o string Output format (yaml|json) (default "yaml")
|
||||
--password string Password for basic authentication to the API server
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--refresh If set to true then recalculates apps reconciliation
|
||||
--repo-server string Repo server address.
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--server-side-diff If set to "true" will use server-side diff while comparing resources. Default ("false")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -62,6 +62,6 @@ argocd admin cluster namespaces my-cluster
|
||||
* [argocd admin cluster generate-spec](argocd_admin_cluster_generate-spec.md) - Generate declarative config for a cluster
|
||||
* [argocd admin cluster kubeconfig](argocd_admin_cluster_kubeconfig.md) - Generates kubeconfig for the specified cluster
|
||||
* [argocd admin cluster namespaces](argocd_admin_cluster_namespaces.md) - Print information namespaces which Argo CD manages in each cluster.
|
||||
* [argocd admin cluster shards](argocd_admin_cluster_shards.md) - Print information about each controller shard and portion of Kubernetes resources it is responsible for.
|
||||
* [argocd admin cluster shards](argocd_admin_cluster_shards.md) - Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.
|
||||
* [argocd admin cluster stats](argocd_admin_cluster_stats.md) - Prints information cluster statistics and inferred shard number
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## argocd admin cluster shards
|
||||
|
||||
Print information about each controller shard and portion of Kubernetes resources it is responsible for.
|
||||
Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.
|
||||
|
||||
```
|
||||
argocd admin cluster shards [flags]
|
||||
@@ -43,6 +43,7 @@ argocd admin cluster shards [flags]
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--shard int Cluster shard filter (default -1)
|
||||
--sharding-method string Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
|
||||
@@ -57,6 +57,7 @@ argocd admin cluster stats target-cluster
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--shard int Cluster shard filter (default -1)
|
||||
--sharding-method string Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
# `argocd admin redis-initial-password` Command Reference
|
||||
|
||||
## argocd admin redis-initial-password
|
||||
|
||||
Ensure the Redis password exists, creating a new one if necessary.
|
||||
|
||||
```
|
||||
argocd admin redis-initial-password [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
-h, --help help for redis-initial-password
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--password string Password for basic authentication to the API server
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--auth-token string Authentication token
|
||||
--client-crt string Client certificate file
|
||||
--client-crt-key string Client certificate key file
|
||||
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")
|
||||
--controller-name string Name of the Argo CD Application controller; set this or the ARGOCD_APPLICATION_CONTROLLER_NAME environment variable when the controller's name label differs from the default, for example when installing via the Helm chart (default "argocd-application-controller")
|
||||
--core If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server
|
||||
--grpc-web Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.
|
||||
--grpc-web-root-path string Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.
|
||||
-H, --header strings Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)
|
||||
--http-retry-max int Maximum number of retries to establish http connection to Argo CD server
|
||||
--insecure Skip server certificate and domain verification
|
||||
--kube-context string Directs the command to the given kube-context
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--plaintext Disable TLS
|
||||
--port-forward Connect to a random argocd-server port using port forwarding
|
||||
--port-forward-namespace string Namespace name which should be used for port forwarding
|
||||
--redis-haproxy-name string Name of the Redis HA Proxy; set this or the ARGOCD_REDIS_HAPROXY_NAME environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart (default "argocd-redis-ha-haproxy")
|
||||
--redis-name string Name of the Redis deployment; set this or the ARGOCD_REDIS_NAME environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart (default "argocd-redis")
|
||||
--repo-server-name string Name of the Argo CD Repo server; set this or the ARGOCD_REPO_SERVER_NAME environment variable when the server's name label differs from the default, for example when installing via the Helm chart (default "argocd-repo-server")
|
||||
--server-crt string Server certificate file
|
||||
--server-name string Name of the Argo CD API server; set this or the ARGOCD_SERVER_NAME environment variable when the server's name label differs from the default, for example when installing via the Helm chart (default "argocd-server")
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [argocd admin](argocd_admin.md) - Contains a set of commands useful for Argo CD administrators and requires direct Kubernetes access
|
||||
|
||||
@@ -22,7 +22,8 @@ argocd admin settings resource-overrides ignore-resource-updates ./deploy.yaml -
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for ignore-resource-updates
|
||||
-h, --help help for ignore-resource-updates
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -17,15 +17,16 @@ argocd app diff APPNAME [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--exit-code Return non-zero exit code when there is a diff (default true)
|
||||
--hard-refresh Refresh application data as well as target manifests cache
|
||||
-h, --help help for diff
|
||||
--local string Compare live app to a local manifests
|
||||
--local-include stringArray Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path. (default [*.yaml,*.yml,*.json])
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
--refresh Refresh application data when retrieving
|
||||
--revision string Compare live app to a particular revision
|
||||
--server-side-generate Used with --local, this will send your manifests to the server for diffing
|
||||
--exit-code Return non-zero exit code when there is a diff (default true)
|
||||
--hard-refresh Refresh application data as well as target manifests cache
|
||||
-h, --help help for diff
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
--local string Compare live app to a local manifests
|
||||
--local-include stringArray Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path. (default [*.yaml,*.yml,*.json])
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
--refresh Refresh application data when retrieving
|
||||
--revision string Compare live app to a particular revision
|
||||
--server-side-generate Used with --local, this will send your manifests to the server for diffing
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -38,31 +38,32 @@ argocd app sync [APPNAME... | -l selector | --project project-name] [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--apply-out-of-sync-only Sync only out-of-sync resources
|
||||
--assumeYes Assume yes as answer for all user queries or prompts
|
||||
--async Do not wait for application to sync before continuing
|
||||
--dry-run Preview apply without affecting cluster
|
||||
--force Use a force apply
|
||||
-h, --help help for sync
|
||||
--info stringArray A list of key-value pairs during sync process. These infos will be persisted in app.
|
||||
--label stringArray Sync only specific resources with a label. This option may be specified repeatedly.
|
||||
--local string Path to a local directory. When this flag is present no git queries will be made
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
-o, --output string Output format. One of: json|yaml|wide|tree|tree=detailed (default "wide")
|
||||
--preview-changes Preview difference against the target and live state before syncing app and wait for user confirmation
|
||||
--project stringArray Sync apps that belong to the specified projects. This option may be specified repeatedly.
|
||||
--prune Allow deleting unexpected resources
|
||||
--replace Use a kubectl create/replace instead apply
|
||||
--resource stringArray Sync only specific resources as GROUP:KIND:NAME or !GROUP:KIND:NAME. Fields may be blank and '*' can be used. This option may be specified repeatedly
|
||||
--retry-backoff-duration duration Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h) (default 5s)
|
||||
--retry-backoff-factor int Factor multiplies the base duration after each failed retry (default 2)
|
||||
--retry-backoff-max-duration duration Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h) (default 3m0s)
|
||||
--retry-limit int Max number of allowed sync retries
|
||||
--revision string Sync to a specific revision. Preserves parameter overrides
|
||||
-l, --selector string Sync apps that match this label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.
|
||||
--server-side Use server-side apply while syncing the application
|
||||
--strategy string Sync strategy (one of: apply|hook)
|
||||
--timeout uint Time out after this many seconds
|
||||
--apply-out-of-sync-only Sync only out-of-sync resources
|
||||
--assumeYes Assume yes as answer for all user queries or prompts
|
||||
--async Do not wait for application to sync before continuing
|
||||
--dry-run Preview apply without affecting cluster
|
||||
--force Use a force apply
|
||||
-h, --help help for sync
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
--info stringArray A list of key-value pairs during sync process. These infos will be persisted in app.
|
||||
--label stringArray Sync only specific resources with a label. This option may be specified repeatedly.
|
||||
--local string Path to a local directory. When this flag is present no git queries will be made
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
-o, --output string Output format. One of: json|yaml|wide|tree|tree=detailed (default "wide")
|
||||
--preview-changes Preview difference against the target and live state before syncing app and wait for user confirmation
|
||||
--project stringArray Sync apps that belong to the specified projects. This option may be specified repeatedly.
|
||||
--prune Allow deleting unexpected resources
|
||||
--replace Use a kubectl create/replace instead apply
|
||||
--resource stringArray Sync only specific resources as GROUP:KIND:NAME or !GROUP:KIND:NAME. Fields may be blank and '*' can be used. This option may be specified repeatedly
|
||||
--retry-backoff-duration duration Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h) (default 5s)
|
||||
--retry-backoff-factor int Factor multiplies the base duration after each failed retry (default 2)
|
||||
--retry-backoff-max-duration duration Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h) (default 3m0s)
|
||||
--retry-limit int Max number of allowed sync retries
|
||||
--revision string Sync to a specific revision. Preserves parameter overrides
|
||||
-l, --selector string Sync apps that match this label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.
|
||||
--server-side Use server-side apply while syncing the application
|
||||
--strategy string Sync strategy (one of: apply|hook)
|
||||
--timeout uint Time out after this many seconds
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -56,7 +56,13 @@ Application.
|
||||
Add the following entry in the argocd-cmd-params-cm configmap:
|
||||
|
||||
```
|
||||
controller.diff.server.side: "true"
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
controller.diff.server.side: "true"
|
||||
...
|
||||
```
|
||||
|
||||
Note: It is necessary to restart the `argocd-application-controller`
|
||||
|
||||
@@ -185,3 +185,16 @@ The list of supported Kubernetes types is available in [diffing_known_types.txt]
|
||||
|
||||
* `core/Quantity`
|
||||
* `meta/v1/duration`
|
||||
|
||||
|
||||
### JQ Path expression timeout
|
||||
|
||||
By default, the evaluation of a JQPathExpression is limited to one second. If you encounter a "JQ patch execution timed out" error message due to a complex JQPathExpression that requires more time to evaluate, you can extend the timeout period by configuring the `ignore.normalizer.jq.timeout` setting within the `argocd-cmd-params-cm` ConfigMap.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
ignore.normalizer.jq.timeout: "5s"
|
||||
|
||||
@@ -106,6 +106,37 @@ spec:
|
||||
namespace: default
|
||||
```
|
||||
|
||||
## Components
|
||||
Kustomize [components](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/components.md) encapsulate both resources and patches together. They provide a powerful way to modularize and reuse configuration in Kubernetes applications.
|
||||
|
||||
Outside of Argo CD, to utilize components, you must add the following to the `kustomization.yaml` that the Application references. For example:
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
...
|
||||
components:
|
||||
- ../component
|
||||
```
|
||||
|
||||
With support added for components in `v2.10.0`, you can now reference a component directly in the Application:
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: application-kustomize-components
|
||||
spec:
|
||||
...
|
||||
source:
|
||||
path: examples/application-kustomize-components/base
|
||||
repoURL: https://github.com/my-user/my-repo
|
||||
targetRevision: main
|
||||
|
||||
# This!
|
||||
kustomize:
|
||||
components:
|
||||
- ../component # relative to the kustomization.yaml (`source.path`).
|
||||
```
|
||||
|
||||
## Private Remote Bases
|
||||
|
||||
If you have remote bases that are either (a) HTTPS and need username/password (b) SSH and need SSH private key, then they'll inherit that from the app's repo.
|
||||
|
||||
@@ -8,9 +8,9 @@ and after a Sync operation. Hooks can also be run if a Sync operation fails at a
|
||||
* Using a `Sync` hook to orchestrate a complex deployment requiring more sophistication than the
|
||||
Kubernetes rolling update strategy.
|
||||
* Using a `PostSync` hook to run integration and health checks after a deployment.
|
||||
* Using a `SyncFail` hook to run clean-up or finalizer logic if a Sync operation fails. _`SyncFail` hooks are only available starting in v1.2_
|
||||
* Using a `PostDelete` hook to run clean-up or finalizer logic after an all Application resources are deleted. Please note that
|
||||
`PostDelete` hooks are only deleted if delete policy matches to the aggregated deletion hooks status and not garbage collected after the application is deleted.
|
||||
* Using a `SyncFail` hook to run clean-up or finalizer logic if a Sync operation fails.
|
||||
* Using a `PostDelete` hook to run clean-up or finalizer logic after all Application resources are deleted. Please note that
|
||||
`PostDelete` hooks are only deleted if the delete policy matches the aggregated deletion hooks status and not garbage collected after the application is deleted.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -39,7 +39,8 @@ The following hooks are defined:
|
||||
| `Sync` | Executes after all `PreSync` hooks completed and were successful, at the same time as the application of the manifests. |
|
||||
| `Skip` | Indicates to Argo CD to skip the application of the manifest. |
|
||||
| `PostSync` | Executes after all `Sync` hooks completed and were successful, a successful application, and all resources in a `Healthy` state. |
|
||||
| `SyncFail` | Executes when the sync operation fails. _Available starting in v1.2_ |
|
||||
| `SyncFail` | Executes when the sync operation fails. |
|
||||
| `PostDelete` | Executes after all Application resources are deleted. _Available starting in v2.10._ |
|
||||
|
||||
### Generate Name
|
||||
|
||||
|
||||
@@ -165,6 +165,21 @@ metadata:
|
||||
argocd.argoproj.io/sync-options: Replace=true
|
||||
```
|
||||
|
||||
## Force Sync
|
||||
|
||||
For certain resources you might want to delete and recreate. e.g. job resources that should run every time when syncing.
|
||||
|
||||
!!! warning
|
||||
During the sync process, the resources will be synchronized using the 'kubectl delete/create' command.
|
||||
This sync option has a destructive action, which could cause an outage for your application.
|
||||
|
||||
In such cases you might use `Force=true` sync option in target resources annotation:
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: Force=true,Replace=true
|
||||
```
|
||||
|
||||
## Server-Side Apply
|
||||
|
||||
This option enables Kubernetes
|
||||
|
||||
78
go.mod
78
go.mod
@@ -13,10 +13,10 @@ require (
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d
|
||||
github.com/alicebob/miniredis/v2 v2.30.4
|
||||
github.com/antonmedv/expr v1.15.2
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20231218194513-aba38192fb16
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240715141017-b6ec82aedce5
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1
|
||||
github.com/aws/aws-sdk-go v1.44.317
|
||||
github.com/aws/aws-sdk-go v1.50.8
|
||||
github.com/bmatcuk/doublestar/v4 v4.6.0
|
||||
github.com/bombsimon/logrusr/v2 v2.0.1
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.6.0
|
||||
@@ -28,7 +28,8 @@ require (
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gfleury/go-bitbucket-v1 v0.0.0-20220301131131-8e7ed04b843e
|
||||
github.com/go-git/go-git/v5 v5.8.1
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/go-jose/go-jose/v3 v3.0.3
|
||||
github.com/go-logr/logr v1.3.0
|
||||
github.com/go-openapi/loads v0.21.2
|
||||
github.com/go-openapi/runtime v0.26.0
|
||||
@@ -38,7 +39,7 @@ require (
|
||||
github.com/gogits/go-gogs-client v0.0.0-20200905025246-8bb8a50cb355
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/go-github/v35 v35.3.0
|
||||
github.com/google/go-jsonnet v0.20.0
|
||||
@@ -50,14 +51,14 @@ require (
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/improbable-eng/grpc-web v0.15.0
|
||||
github.com/itchyny/gojq v0.12.13
|
||||
github.com/jeremywohl/flatten v1.0.1
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/ktrysmt/go-bitbucket v0.9.67
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/mattn/go-zglob v0.0.4
|
||||
github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
@@ -76,23 +77,22 @@ require (
|
||||
github.com/whilp/git-urls v1.0.0
|
||||
github.com/xanzy/go-gitlab v0.91.1
|
||||
github.com/yuin/gopher-lua v1.1.0
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/crypto v0.19.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/term v0.13.0
|
||||
golang.org/x/term v0.17.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/square/go-jose.v2 v2.6.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.26.11
|
||||
k8s.io/apiextensions-apiserver v0.26.4
|
||||
k8s.io/apiextensions-apiserver v0.26.10
|
||||
k8s.io/apimachinery v0.26.11
|
||||
k8s.io/apiserver v0.26.11
|
||||
k8s.io/client-go v0.26.11
|
||||
@@ -103,7 +103,7 @@ require (
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
|
||||
layeh.com/gopher-json v0.0.0-20190114024228-97fed8db8427
|
||||
oras.land/oras-go/v2 v2.3.0
|
||||
sigs.k8s.io/controller-runtime v0.14.6
|
||||
sigs.k8s.io/controller-runtime v0.14.7
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
@@ -114,19 +114,20 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/google/s2a-go v0.1.4 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
|
||||
@@ -159,9 +160,8 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -184,8 +184,7 @@ require (
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.5.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
@@ -243,7 +242,7 @@ require (
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
@@ -251,7 +250,7 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sergi/go-diff v1.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.2 // indirect
|
||||
github.com/slack-go/slack v0.12.2 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
@@ -268,11 +267,11 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/net v0.19.0
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.12.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
gomodules.xyz/notify v0.1.1 // indirect
|
||||
@@ -296,9 +295,12 @@ replace (
|
||||
// https://github.com/golang/go/issues/33546#issuecomment-519656923
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.4.2
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.5.4
|
||||
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
|
||||
// Avoid CVE-2023-46402
|
||||
github.com/whilp/git-urls => github.com/chainguard-dev/git-urls v1.0.2
|
||||
|
||||
// Avoid CVE-2022-3064
|
||||
gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0
|
||||
|
||||
|
||||
160
go.sum
160
go.sum
@@ -657,8 +657,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0 h1:S1NcMKECxT5hJwV4VT+QzeSsSiv4oWl1s2821dUqG/8=
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0/go.mod h1:PuFyJKRz1liIAH4h5KVXVD18Obpp1ZXRdxHvmGXooro=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 h1:prBTRx78AQnXzivNT9Crhu564W/zPPr3ibSlpT9xKcE=
|
||||
@@ -668,8 +668,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d h1:WtAMR0fPCOfK7TPGZ8ZpLLY18HRvL7XJ3xcs0wnREgo=
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d/go.mod h1:WML6KOYjeU8N6YyusMjj2qRvaPNUEvrQvaxuFcMRFJY=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
|
||||
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
|
||||
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
|
||||
@@ -696,10 +694,10 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20231218194513-aba38192fb16 h1:kR15L8UsSVr7oitABKU88msQirMT0/RO/KRla1jkq/s=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20231218194513-aba38192fb16/go.mod h1:gWE8uROi7hIkWGNAVM+8FWkMfo0vZ03SLx/aFw/DBzg=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9 h1:1lt0VXzmLK7Vv0kaeal3S6/JIfzPyBORkUWXhiqF3l0=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9/go.mod h1:E/vv4+by868m0mmflaRfGBmKBtAupoF+mmyfekP8QCk=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240715141017-b6ec82aedce5 h1:YF0xxjIYPeZfsKfZtTd7rxEWQ7EeiTBJHO3PmQ2kV3c=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240715141017-b6ec82aedce5/go.mod h1:d4eLldeEFyZIcVySAMhXhnh1tTa4qfvPYfut9B8UClw=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604 h1:pMfBao6Vm1Ax0xGIp9BWEia2nKkccHwV0dTEdrsFOpo=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604/go.mod h1:TsyusmXQWIL0ST7YMRG/ered7WlWDmbmnPpXnS2LJmM=
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1 h1:qsHwwOJ21K2Ao0xPju1sNuqphyMnMYkyB3ZLoLtxWpo=
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1/go.mod h1:CZHlkyAD1/+FbEn6cB2DQTj48IoLGvEYsWEvtzP3238=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@@ -715,35 +713,37 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU=
|
||||
github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.50.8 h1:gY0WoOW+/Wz6XmYSgDH9ge3wnAevYDSQWPxxJvqAkP4=
|
||||
github.com/aws/aws-sdk-go v1.50.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8 h1:lDpy0WM8AHsywOnVrOHaSMfpaiV2igOw8D7svkFkXVA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8/go.mod h1:5XCmmyutmzzgkpk/6NYTjeWb6lgo9N170m1j6pQkIBs=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 h1:vTrwTvv5qAwjWIGhZDSBH/oQHuIQjGmD232k01FUh6A=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8/go.mod h1:lVa4OHbvgjVot4gmh1uouF1ubgexSCN92P6CJQpT0t8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 h1:KeTxcGdNnQudb46oOl4d90f2I33DF/c6q3RnZAmvQdQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28/go.mod h1:yRZVr/iT0AqyHeep00SZ4YfBAKojXz08w3XMBscdi0c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 h1:tQoMg8i4nFAB70cJ4wiAYEiZRYo2P6uDmU2D6ys/igo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0/go.mod h1:jQhN5f4p3PALMNlUtfb/0wGIFlV7vGtJlPDVfxfNfPY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 h1:/2gzjhQowRLarkkBOGPXSRnb8sQ2RVsjdG1C/UliK/c=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0/go.mod h1:wo/B7uUm/7zw/dWhBJ4FXuw1sySU5lyIhVg1Bu2yL9A=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 h1:Jfly6mRxk2ZOSlbCvZfKNS7TukSx1mIzhSsqZ/IGSZI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0/go.mod h1:TZSH7xLO7+phDtViY/KUp9WGCJMQkLJ/VpgkTFd5gh8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 h1:kOO++CYo50RcTFISESluhWEi5Prhg+gaSs4whWabiZU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12 h1:mF4cMuNh/2G+d19nWnm1vJ/ak0qK6SbqF0KtSX9pxu0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12/go.mod h1:lOvvqtZP9p29GIjOTuA/76HiVk0c/s8qRcFRq2+E2uc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 h1:tRNrFDGRm81e6nTX5Q4CFblea99eAfm0dxXazGpLceU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7/go.mod h1:8GWUDux5Z2h6z2efAtr54RdHXtLm8sq7Rg85ZNY/CZM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
|
||||
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
|
||||
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@@ -786,6 +786,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
|
||||
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
||||
github.com/chainguard-dev/git-urls v1.0.2 h1:pSpT7ifrpc5X55n4aTTm7FFUE+ZQHKiqpiwNkJrVcKQ=
|
||||
github.com/chainguard-dev/git-urls v1.0.2/go.mod h1:rbGgj10OS7UgZlbzdUQIQpT0k/D4+An04HJY7Ol+Y/o=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@@ -850,8 +852,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0=
|
||||
github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
@@ -889,6 +891,8 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+ne
|
||||
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
@@ -925,17 +929,17 @@ github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H
|
||||
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
|
||||
github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
|
||||
github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A=
|
||||
github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo=
|
||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
@@ -1088,8 +1092,8 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@@ -1237,14 +1241,14 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.1/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
@@ -1377,17 +1381,17 @@ github.com/malexdev/utfutil v0.0.0-20180510171754-00c8d4a8e7a8 h1:A6SLdFpRzUUF5v
|
||||
github.com/malexdev/utfutil v0.0.0-20180510171754-00c8d4a8e7a8/go.mod h1:UtpLyb/EupVKXF/N0b4NRe1DNg+QYJsnsHQ038romhM=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
|
||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
@@ -1498,8 +1502,9 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ
|
||||
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
|
||||
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
@@ -1625,8 +1630,8 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
|
||||
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
|
||||
github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo=
|
||||
github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
|
||||
github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ=
|
||||
@@ -1700,8 +1705,6 @@ github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2el
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU=
|
||||
github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE=
|
||||
github.com/xanzy/go-gitlab v0.91.1 h1:gnV57IPGYywWer32oXKBcdmc8dVxeKl3AauV8Bu17rw=
|
||||
github.com/xanzy/go-gitlab v0.91.1/go.mod h1:5ryv+MnpZStBH8I/77HuQBsMbBGANtVpLWC15qOjWAw=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
@@ -1747,8 +1750,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
@@ -1796,7 +1799,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@@ -1815,8 +1817,9 @@ golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1961,8 +1964,9 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -2134,8 +2138,9 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -2149,8 +2154,9 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -2169,8 +2175,9 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -2260,8 +2267,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
|
||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -2574,8 +2581,9 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||
@@ -2596,8 +2604,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/retry.v1 v1.0.3 h1:a9CArYczAVv6Qs6VGoLMio99GEs7kY9UzSF9+LD+iGs=
|
||||
gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
@@ -2705,8 +2711,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/controller-runtime v0.14.7 h1:Vrnm2vk9ZFlRkXATHz0W0wXcqNl7kPat8q2JyxVy0Q8=
|
||||
sigs.k8s.io/controller-runtime v0.14.7/go.mod h1:ErTs3SJCOujNUnTz4AS+uh8hp6DHMo1gj6fFndJT1X8=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
@@ -64,6 +65,11 @@ func updateMkDocsNav(parent string, child string, subchild string, files []strin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The marshaller drops custom tags, so re-add this one. Turns out this is much less invasive than trying to handle
|
||||
// it at the YAML parser level.
|
||||
newmkdocs = bytes.Replace(newmkdocs, []byte("site_url: READTHEDOCS_CANONICAL_URL"), []byte("site_url: !ENV READTHEDOCS_CANONICAL_URL"), 1)
|
||||
|
||||
return os.WriteFile("mkdocs.yml", newmkdocs, 0644)
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
f43e1c3387de24547506ab05d24e5309c0ce0b228c23bd8aa64e9ec4b8206651 helm-v3.14.0-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
b29e61674731b15f6ad3d1a3118a99d3cc2ab25a911aad1b8ac8c72d5a9d2952 helm-v3.14.0-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f1f9d3561724863edd4c06d89acb2e2fd8ae0f1b72058ceb891fa1c346ce5dbc helm-v3.14.0-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
82298ef39936f1bef848959a29f77bff92d1309d8646657e3a7733702e81288c helm-v3.14.0-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
75496ea824f92305ff7d28af37f4af57536bf5138399c824dff997b9d239dd42 helm-v3.14.1-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f865b8ad4228fd0990bbc5b50615eb6cb9eb31c9a9ca7238401ed897bbbe9033 helm-v3.14.1-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
4d853ab8fe3462287c7272fbadd5f73531ecdd6fa0db37d31630e41ae1ae21de helm-v3.14.1-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
19bf07999c7244bfeb0fd27152919b9faa1148cf43910edbb98efa9150058a98 helm-v3.14.1-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
0885a501d586c1e949e9b113bf3fb3290b0bbf74db9444a1d8c2723a143006a5 helm-v3.14.2-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
c65d6a9557bb359abc2c0d26670de850b52327dc3976ad6f9e14c298ea3e1b61 helm-v3.14.2-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f3bc8582ff151e619cd285d9cdf9fef1c5733ee5522d8bed2ef680ef07f87223 helm-v3.14.2-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
7bda34aa26638e5116b31385f3b781172572175bf4c1ae00c87d8b154458ed94 helm-v3.14.2-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
4d5d01a94c7d6b07e71690dc1988bf3229680284c87f4242d28c6f1cc99653be helm-v3.14.3-darwin-amd64.tar.gz
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user