mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
335875d13e | ||
|
|
4192e3f3ac | ||
|
|
3e5a878f6e | ||
|
|
47d586169f | ||
|
|
f5d63a5c77 | ||
|
|
ce04dc5c6f | ||
|
|
cebb6538f7 | ||
|
|
ab7e45da13 | ||
|
|
a8ae929d55 | ||
|
|
f3fdaa7eab | ||
|
|
0b4659c046 | ||
|
|
0fd6344537 | ||
|
|
0977f61554 | ||
|
|
3dd069b049 | ||
|
|
37da5e2ae5 | ||
|
|
12886657ac | ||
|
|
fcf5d8c238 | ||
|
|
1ee3c80bc8 | ||
|
|
a79fcad0e9 | ||
|
|
67e57a47a2 | ||
|
|
d99ee9d28b | ||
|
|
28a9225e7b | ||
|
|
f5d6b2972b | ||
|
|
06e2e0da9a | ||
|
|
a79e0eaca4 | ||
|
|
65461a1b61 | ||
|
|
2268f08819 | ||
|
|
a1a5c58a7d | ||
|
|
9c379af169 | ||
|
|
4e01115a48 | ||
|
|
eddf0a5f30 | ||
|
|
2175939ed6 | ||
|
|
1c959b7b0d | ||
|
|
dfba4098d1 | ||
|
|
1835210574 | ||
|
|
b78befe441 | ||
|
|
e932556758 | ||
|
|
28c29380d4 | ||
|
|
18f82913b6 | ||
|
|
d9ece9295e | ||
|
|
0c62f6d6b9 | ||
|
|
14b8762684 | ||
|
|
c6469aef7e | ||
|
|
a848105a6d | ||
|
|
b657e97448 | ||
|
|
00fae11d99 | ||
|
|
c3d125f616 | ||
|
|
1041086231 | ||
|
|
da10276c8d | ||
|
|
4d0f940e04 | ||
|
|
8b865d7e30 | ||
|
|
a6d8c924ee | ||
|
|
3c5878ecf4 | ||
|
|
12f2252700 | ||
|
|
6f6a9a940b | ||
|
|
3ca67858f0 | ||
|
|
3f18c21c07 | ||
|
|
3ebcca66f3 | ||
|
|
d9196060c2 |
4
.github/workflows/image-reuse.yaml
vendored
4
.github/workflows/image-reuse.yaml
vendored
@@ -74,9 +74,7 @@ jobs:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@1fc5bd396d372bee37d608f955b336615edf79c8 # v3.2.0
|
||||
with:
|
||||
cosign-release: 'v2.2.1'
|
||||
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
|
||||
|
||||
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
|
||||
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
|
||||
2
.github/workflows/image.yaml
vendored
2
.github/workflows/image.yaml
vendored
@@ -86,7 +86,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.10.0
|
||||
with:
|
||||
image: ghcr.io/argoproj/argo-cd/argocd
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
|
||||
14
.github/workflows/release.yaml
vendored
14
.github/workflows/release.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.10.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
@@ -87,6 +87,14 @@ jobs:
|
||||
echo "KUBECTL_VERSION=$(go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
|
||||
id: run-goreleaser
|
||||
@@ -120,7 +128,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
@@ -204,7 +212,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
|
||||
2
Makefile
2
Makefile
@@ -49,7 +49,7 @@ ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=60m
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,4 +1,4 @@
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
|
||||
1
USERS.md
1
USERS.md
@@ -40,6 +40,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Bulder Bank](https://bulderbank.no)
|
||||
1. [CAM](https://cam-inc.co.jp)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Candis](https://www.candis.io)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
|
||||
@@ -7405,6 +7405,7 @@
|
||||
"properties": {
|
||||
"elements": {
|
||||
"type": "array",
|
||||
"title": "+kubebuilder:validation:Optional",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1JSON"
|
||||
}
|
||||
|
||||
@@ -24,15 +24,12 @@ import (
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/argoproj/argo-cd/v2/util/tls"
|
||||
"github.com/argoproj/argo-cd/v2/util/trace"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -50,6 +47,7 @@ func NewCommand() *cobra.Command {
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
appResyncJitter int64
|
||||
repoErrorGracePeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
@@ -146,7 +144,8 @@ func NewCommand() *cobra.Command {
|
||||
appController.InvalidateProjectsCache()
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -157,6 +156,7 @@ func NewCommand() *cobra.Command {
|
||||
kubectl,
|
||||
resyncDuration,
|
||||
hardResyncDuration,
|
||||
time.Duration(appResyncJitter)*time.Second,
|
||||
time.Duration(selfHealTimeoutSeconds)*time.Second,
|
||||
time.Duration(repoErrorGracePeriod)*time.Second,
|
||||
metricsPort,
|
||||
@@ -164,10 +164,11 @@ func NewCommand() *cobra.Command {
|
||||
metricsAplicationLabels,
|
||||
kubectlParallelismLimit,
|
||||
persistResourceHealth,
|
||||
clusterFilter,
|
||||
clusterSharding,
|
||||
applicationNamespaces,
|
||||
&workqueueRateLimit,
|
||||
serverSideDiff,
|
||||
enableDynamicClusterDistribution,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
@@ -194,6 +195,7 @@ func NewCommand() *cobra.Command {
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.")
|
||||
command.Flags().Int64Var(&appHardResyncPeriod, "app-hard-resync", int64(env.ParseDurationFromEnv("ARGOCD_HARD_RECONCILIATION_TIMEOUT", defaultAppHardResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application hard resync.")
|
||||
command.Flags().Int64Var(&appResyncJitter, "app-resync-jitter", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_JITTER", 0*time.Second, 0, math.MaxInt64).Seconds()), "Maximum time period in seconds to add as a delay jitter for application resync.")
|
||||
command.Flags().Int64Var(&repoErrorGracePeriod, "repo-error-grace-period-seconds", int64(env.ParseDurationFromEnv("ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Grace period in seconds for ignoring consecutive errors while communicating with repo server.")
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
|
||||
@@ -218,7 +220,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
|
||||
// global queue rate limit config
|
||||
command.Flags().Int64Var(&workqueueRateLimit.BucketSize, "wq-bucket-size", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_SIZE", 500, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket Size, default 500")
|
||||
command.Flags().Int64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_QPS", 50, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket QPS, default 50")
|
||||
command.Flags().Float64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseFloat64FromEnv("WORKQUEUE_BUCKET_QPS", math.MaxFloat64, 1, math.MaxFloat64), "Set Workqueue Rate Limiter Bucket QPS, default set to MaxFloat64 which disables the bucket limiter")
|
||||
// individual item rate limit config
|
||||
// when WORKQUEUE_FAILURE_COOLDOWN is 0 per item rate limiting is disabled(default)
|
||||
command.Flags().DurationVar(&workqueueRateLimit.FailureCoolDown, "wq-cooldown-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_FAILURE_COOLDOWN_NS", 0, 0, (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)")
|
||||
@@ -232,58 +234,3 @@ func NewCommand() *cobra.Command {
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {
|
||||
|
||||
var replicas int
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
|
||||
if err != nil && kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
}
|
||||
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicas = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
var clusterFilter func(cluster *v1alpha1.Cluster) bool
|
||||
if replicas > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil {
|
||||
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard)
|
||||
if !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shard < 0 {
|
||||
var err error
|
||||
shard, err = sharding.InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Processing clusters from shard %d", shard)
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm)
|
||||
clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
return clusterFilter
|
||||
}
|
||||
|
||||
@@ -68,6 +68,7 @@ func NewCommand() *cobra.Command {
|
||||
streamedManifestMaxTarSize string
|
||||
streamedManifestMaxExtractedSize string
|
||||
helmManifestMaxExtractedSize string
|
||||
helmRegistryMaxIndexSize string
|
||||
disableManifestMaxExtractedSize bool
|
||||
)
|
||||
var command = cobra.Command{
|
||||
@@ -110,6 +111,9 @@ func NewCommand() *cobra.Command {
|
||||
helmManifestMaxExtractedSizeQuantity, err := resource.ParseQuantity(helmManifestMaxExtractedSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
helmRegistryMaxIndexSizeQuantity, err := resource.ParseQuantity(helmRegistryMaxIndexSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
askPassServer := askpass.NewServer()
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
cacheutil.CollectMetrics(redisClient, metricsServer)
|
||||
@@ -125,6 +129,7 @@ func NewCommand() *cobra.Command {
|
||||
StreamedManifestMaxExtractedSize: streamedManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
StreamedManifestMaxTarSize: streamedManifestMaxTarSizeQuantity.ToDec().Value(),
|
||||
HelmManifestMaxExtractedSize: helmManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
HelmRegistryMaxIndexSize: helmRegistryMaxIndexSizeQuantity.ToDec().Value(),
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -208,6 +213,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&streamedManifestMaxTarSize, "streamed-manifest-max-tar-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_TAR_SIZE", "100M"), "Maximum size of streamed manifest archives")
|
||||
command.Flags().StringVar(&streamedManifestMaxExtractedSize, "streamed-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of streamed manifest archives when extracted")
|
||||
command.Flags().StringVar(&helmManifestMaxExtractedSize, "helm-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of helm manifest archives when extracted")
|
||||
command.Flags().StringVar(&helmRegistryMaxIndexSize, "helm-registry-max-index-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_INDEX_SIZE", "1G"), "Maximum size of registry index file")
|
||||
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
@@ -61,6 +62,7 @@ func NewCommand() *cobra.Command {
|
||||
repoServerAddress string
|
||||
dexServerAddress string
|
||||
disableAuth bool
|
||||
contentTypes string
|
||||
enableGZip bool
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
cacheSrc func() (*servercache.Cache, error)
|
||||
@@ -165,6 +167,11 @@ func NewCommand() *cobra.Command {
|
||||
baseHRef = rootPath
|
||||
}
|
||||
|
||||
var contentTypesList []string
|
||||
if contentTypes != "" {
|
||||
contentTypesList = strings.Split(contentTypes, ";")
|
||||
}
|
||||
|
||||
argoCDOpts := server.ArgoCDServerOpts{
|
||||
Insecure: insecure,
|
||||
ListenPort: listenPort,
|
||||
@@ -180,6 +187,7 @@ func NewCommand() *cobra.Command {
|
||||
DexServerAddr: dexServerAddress,
|
||||
DexTLSConfig: dexTlsConfig,
|
||||
DisableAuth: disableAuth,
|
||||
ContentTypes: contentTypesList,
|
||||
EnableGZip: enableGZip,
|
||||
TLSConfigCustomizer: tlsConfigCustomizer,
|
||||
Cache: cache,
|
||||
@@ -234,6 +242,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
|
||||
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json", env.StringFromEnvOpts{AllowEmpty: true}), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
@@ -78,7 +79,7 @@ type ClusterWithInfo struct {
|
||||
Namespaces []string
|
||||
}
|
||||
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, shardingAlgorithm string, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
|
||||
|
||||
argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
|
||||
@@ -86,6 +87,10 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusterShardingCache := sharding.NewClusterSharding(argoDB, shard, replicas, shardingAlgorithm)
|
||||
clusterShardingCache.Init(clustersList)
|
||||
clusterShards := clusterShardingCache.GetDistribution()
|
||||
|
||||
var cache *appstatecache.Cache
|
||||
if portForwardRedis {
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
@@ -122,8 +127,15 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
apps[i] = app
|
||||
}
|
||||
clusters := make([]ClusterWithInfo, len(clustersList.Items))
|
||||
|
||||
batchSize := 10
|
||||
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
|
||||
clusterSharding := &sharding.ClusterSharding{
|
||||
Shard: shard,
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
}
|
||||
for batchNum := 0; batchNum < batchesCount; batchNum++ {
|
||||
batchStart := batchSize * batchNum
|
||||
batchEnd := batchSize * (batchNum + 1)
|
||||
@@ -135,12 +147,12 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
clusterShard := 0
|
||||
cluster := batch[i]
|
||||
if replicas > 0 {
|
||||
distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(clusterSharding.GetClusterAccessor(), common.DefaultShardingAlgorithm, replicas)
|
||||
distributionFunction(&cluster)
|
||||
clusterShard := clusterShards[cluster.Server]
|
||||
cluster.Shard = pointer.Int64(int64(clusterShard))
|
||||
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
|
||||
}
|
||||
|
||||
if shard != -1 && clusterShard != shard {
|
||||
return nil
|
||||
}
|
||||
@@ -176,6 +188,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
@@ -183,7 +196,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "shards",
|
||||
Short: "Print information about each controller shard and portion of Kubernetes resources it is responsible for.",
|
||||
Short: "Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -203,8 +216,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
if replicas == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
errors.CheckError(err)
|
||||
if len(clusters) == 0 {
|
||||
return
|
||||
@@ -216,7 +228,9 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
|
||||
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
|
||||
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
|
||||
|
||||
// parse all added flags so far to get the redis-compression flag that was added by AddCacheFlagsToCmd() above
|
||||
@@ -461,6 +475,7 @@ func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
@@ -494,7 +509,7 @@ argocd admin cluster stats target-cluster`,
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
errors.CheckError(err)
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
@@ -508,6 +523,7 @@ argocd admin cluster stats target-cluster`,
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
|
||||
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
|
||||
|
||||
|
||||
@@ -1116,6 +1116,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
defer argoio.Close(conn)
|
||||
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
|
||||
errors.CheckError(err)
|
||||
|
||||
diffOption.local = local
|
||||
diffOption.localRepoRoot = localRepoRoot
|
||||
diffOption.cluster = cluster
|
||||
|
||||
@@ -115,9 +115,9 @@ const (
|
||||
LegacyShardingAlgorithm = "legacy"
|
||||
// RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards
|
||||
RoundRobinShardingAlgorithm = "round-robin"
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
// AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller
|
||||
AppControllerHeartbeatUpdateRetryCount = 3
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
)
|
||||
|
||||
// Dex related constants
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
@@ -113,11 +114,11 @@ type ApplicationController struct {
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
appStateManager AppStateManager
|
||||
stateCache statecache.LiveStateCache
|
||||
statusRefreshTimeout time.Duration
|
||||
statusHardRefreshTimeout time.Duration
|
||||
statusRefreshJitter time.Duration
|
||||
selfHealTimeout time.Duration
|
||||
repoClientset apiclient.Clientset
|
||||
db db.ArgoDB
|
||||
@@ -126,9 +127,13 @@ type ApplicationController struct {
|
||||
refreshRequestedAppsMutex *sync.Mutex
|
||||
metricsServer *metrics.MetricsServer
|
||||
kubectlSemaphore *semaphore.Weighted
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
clusterSharding sharding.ClusterShardingCache
|
||||
projByNameCache sync.Map
|
||||
applicationNamespaces []string
|
||||
|
||||
// dynamicClusterDistributionEnabled if disabled deploymentInformer is never initialized
|
||||
dynamicClusterDistributionEnabled bool
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
}
|
||||
|
||||
// NewApplicationController creates new instance of ApplicationController.
|
||||
@@ -142,6 +147,7 @@ func NewApplicationController(
|
||||
kubectl kube.Kubectl,
|
||||
appResyncPeriod time.Duration,
|
||||
appHardResyncPeriod time.Duration,
|
||||
appResyncJitter time.Duration,
|
||||
selfHealTimeout time.Duration,
|
||||
repoErrorGracePeriod time.Duration,
|
||||
metricsPort int,
|
||||
@@ -149,39 +155,42 @@ func NewApplicationController(
|
||||
metricsApplicationLabels []string,
|
||||
kubectlParallelismLimit int64,
|
||||
persistResourceHealth bool,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
clusterSharding sharding.ClusterShardingCache,
|
||||
applicationNamespaces []string,
|
||||
rateLimiterConfig *ratelimiter.AppControllerRateLimiterConfig,
|
||||
serverSideDiff bool,
|
||||
dynamicClusterDistributionEnabled bool,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v", appResyncPeriod, appHardResyncPeriod)
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
if rateLimiterConfig == nil {
|
||||
rateLimiterConfig = ratelimiter.GetDefaultAppRateLimiterConfig()
|
||||
log.Info("Using default workqueue rate limiter config")
|
||||
}
|
||||
ctrl := ApplicationController{
|
||||
cache: argoCache,
|
||||
namespace: namespace,
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterFilter: clusterFilter,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
cache: argoCache,
|
||||
namespace: namespace,
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
statusRefreshJitter: appResyncJitter,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
dynamicClusterDistributionEnabled: dynamicClusterDistributionEnabled,
|
||||
}
|
||||
if kubectlParallelismLimit > 0 {
|
||||
ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
|
||||
@@ -224,25 +233,33 @@ func NewApplicationController(
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
|
||||
deploymentInformer := factory.Apps().V1().Deployments()
|
||||
|
||||
var deploymentInformer informerv1.DeploymentInformer
|
||||
|
||||
// only initialize deployment informer if dynamic distribution is enabled
|
||||
if dynamicClusterDistributionEnabled {
|
||||
deploymentInformer = factory.Apps().V1().Deployments()
|
||||
}
|
||||
|
||||
readinessHealthCheck := func(r *http.Request) error {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
if dynamicClusterDistributionEnabled {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -260,7 +277,7 @@ func NewApplicationController(
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter, argo.NewResourceTracking())
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
@@ -770,7 +787,18 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
|
||||
go ctrl.appInformer.Run(ctx.Done())
|
||||
go ctrl.projInformer.Run(ctx.Done())
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
|
||||
if ctrl.dynamicClusterDistributionEnabled {
|
||||
// only start deployment informer if dynamic distribution is enabled
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
}
|
||||
|
||||
clusters, err := ctrl.db.ListClusters(ctx)
|
||||
if err != nil {
|
||||
log.Warnf("Cannot init sharding. Error while querying clusters list from database: %v", err)
|
||||
} else {
|
||||
ctrl.clusterSharding.Init(clusters)
|
||||
}
|
||||
|
||||
errors.CheckError(ctrl.stateCache.Init())
|
||||
|
||||
@@ -1636,6 +1664,7 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
var reason string
|
||||
compareWith := CompareWithLatest
|
||||
refreshType := appv1.RefreshTypeNormal
|
||||
|
||||
softExpired := app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
|
||||
hardExpired := (app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusHardRefreshTimeout).Before(time.Now().UTC())) && statusHardRefreshTimeout.Seconds() != 0
|
||||
|
||||
@@ -1976,15 +2005,11 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
}
|
||||
}
|
||||
|
||||
if ctrl.clusterFilter != nil {
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return ctrl.clusterFilter(nil)
|
||||
}
|
||||
return ctrl.clusterFilter(cluster)
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return ctrl.clusterSharding.IsManagedCluster(nil)
|
||||
}
|
||||
|
||||
return true
|
||||
return ctrl.clusterSharding.IsManagedCluster(cluster)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) {
|
||||
@@ -2092,14 +2117,25 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var compareWith *CompareWith
|
||||
var delay *time.Duration
|
||||
|
||||
oldApp, oldOK := old.(*appv1.Application)
|
||||
newApp, newOK := new.(*appv1.Application)
|
||||
if oldOK && newOK && automatedSyncEnabled(oldApp, newApp) {
|
||||
log.WithField("application", newApp.QualifiedName()).Info("Enabled automated sync")
|
||||
compareWith = CompareWithLatest.Pointer()
|
||||
if oldOK && newOK {
|
||||
if automatedSyncEnabled(oldApp, newApp) {
|
||||
log.WithField("application", newApp.QualifiedName()).Info("Enabled automated sync")
|
||||
compareWith = CompareWithLatest.Pointer()
|
||||
}
|
||||
if ctrl.statusRefreshJitter != 0 && oldApp.ResourceVersion == newApp.ResourceVersion {
|
||||
// Handler is refreshing the apps, add a random jitter to spread the load and avoid spikes
|
||||
jitter := time.Duration(float64(ctrl.statusRefreshJitter) * rand.Float64())
|
||||
delay = &jitter
|
||||
}
|
||||
}
|
||||
ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, nil)
|
||||
|
||||
ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, delay)
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
@@ -2136,7 +2172,7 @@ func (ctrl *ApplicationController) projectErrorToCondition(err error, app *appv1
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) {
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterFilter, ctrl.getAppProj, ctrl.namespace)
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterSharding.IsManagedCluster, ctrl.getAppProj, ctrl.namespace)
|
||||
go updater.Run(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,9 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
|
||||
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -142,6 +144,7 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
kubectl,
|
||||
time.Minute,
|
||||
time.Hour,
|
||||
time.Second,
|
||||
time.Minute,
|
||||
time.Second*10,
|
||||
common.DefaultPortArgoCDMetrics,
|
||||
@@ -152,8 +155,14 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
nil,
|
||||
data.applicationNamespaces,
|
||||
nil,
|
||||
|
||||
false,
|
||||
false,
|
||||
)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
// Setting a default sharding algorithm for the tests where we cannot set it.
|
||||
ctrl.clusterSharding = sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -686,7 +695,6 @@ func TestFinalizeAppDeletion(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(appObj): appObj,
|
||||
}}, nil)
|
||||
|
||||
patched := false
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
defaultReactor := fakeAppCs.ReactionChain[0]
|
||||
@@ -1809,13 +1817,11 @@ func Test_canProcessApp(t *testing.T) {
|
||||
})
|
||||
t.Run("with cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
|
||||
21
controller/cache/cache.go
vendored
21
controller/cache/cache.go
vendored
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
@@ -168,7 +169,7 @@ func NewLiveStateCache(
|
||||
kubectl kube.Kubectl,
|
||||
metricsServer *metrics.MetricsServer,
|
||||
onObjectUpdated ObjectUpdatedHandler,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
clusterSharding sharding.ClusterShardingCache,
|
||||
resourceTracking argo.ResourceTracking) LiveStateCache {
|
||||
|
||||
return &liveStateCache{
|
||||
@@ -179,7 +180,7 @@ func NewLiveStateCache(
|
||||
kubectl: kubectl,
|
||||
settingsMgr: settingsMgr,
|
||||
metricsServer: metricsServer,
|
||||
clusterFilter: clusterFilter,
|
||||
clusterSharding: clusterSharding,
|
||||
resourceTracking: resourceTracking,
|
||||
}
|
||||
}
|
||||
@@ -202,7 +203,7 @@ type liveStateCache struct {
|
||||
kubectl kube.Kubectl
|
||||
settingsMgr *settings.SettingsManager
|
||||
metricsServer *metrics.MetricsServer
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
clusterSharding sharding.ClusterShardingCache
|
||||
resourceTracking argo.ResourceTracking
|
||||
|
||||
clusters map[string]clustercache.ClusterCache
|
||||
@@ -722,22 +723,24 @@ func (c *liveStateCache) Run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) canHandleCluster(cluster *appv1.Cluster) bool {
|
||||
if c.clusterFilter == nil {
|
||||
return true
|
||||
}
|
||||
return c.clusterFilter(cluster)
|
||||
return c.clusterSharding.IsManagedCluster(cluster)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
|
||||
c.clusterSharding.Add(cluster)
|
||||
if !c.canHandleCluster(cluster) {
|
||||
log.Infof("Ignoring cluster %s", cluster.Server)
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
_, ok := c.clusters[cluster.Server]
|
||||
c.lock.Unlock()
|
||||
if !ok {
|
||||
log.Debugf("Checking if cache %v / cluster %v has appInformer %v", c, cluster, c.appInformer)
|
||||
if c.appInformer == nil {
|
||||
log.Warn("Cannot get a cluster appInformer. Cache may not be started this time")
|
||||
return
|
||||
}
|
||||
if c.isClusterHasApps(c.appInformer.GetStore().List(), cluster) {
|
||||
go func() {
|
||||
// warm up cache for cluster with apps
|
||||
@@ -748,6 +751,7 @@ func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *appv1.Cluster) {
|
||||
c.clusterSharding.Update(oldCluster, newCluster)
|
||||
c.lock.Lock()
|
||||
cluster, ok := c.clusters[newCluster.Server]
|
||||
c.lock.Unlock()
|
||||
@@ -790,6 +794,7 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
|
||||
|
||||
func (c *liveStateCache) handleDeleteEvent(clusterServer string) {
|
||||
c.lock.RLock()
|
||||
c.clusterSharding.Delete(clusterServer)
|
||||
cluster, ok := c.clusters[clusterServer]
|
||||
c.lock.RUnlock()
|
||||
if ok {
|
||||
|
||||
50
controller/cache/cache_test.go
vendored
50
controller/cache/cache_test.go
vendored
@@ -21,7 +21,11 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
@@ -35,11 +39,13 @@ func TestHandleModEvent_HasChanges(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
clusterCache.On("EnsureSynced").Return(nil).Once()
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -56,14 +62,22 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
clusterCache.On("EnsureSynced").Return(nil).Once()
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return false
|
||||
db: nil,
|
||||
appInformer: nil,
|
||||
onObjectUpdated: func(managedByApp map[string]bool, ref v1.ObjectReference) {
|
||||
},
|
||||
kubectl: nil,
|
||||
settingsMgr: &argosettings.SettingsManager{},
|
||||
metricsServer: &metrics.MetricsServer{},
|
||||
// returns a shard that never process any cluster
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
resourceTracking: nil,
|
||||
clusters: map[string]cache.ClusterCache{"https://mycluster": clusterCache},
|
||||
cacheSettings: cacheSettings{},
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -75,18 +89,20 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
|
||||
Namespaces: []string{"default"},
|
||||
})
|
||||
|
||||
assert.Len(t, clustersCache.clusters, 0)
|
||||
assert.Len(t, clustersCache.clusters, 1)
|
||||
}
|
||||
|
||||
func TestHandleModEvent_NoChanges(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything).Panic("should not invalidate")
|
||||
clusterCache.On("EnsureSynced").Return(nil).Panic("should not re-sync")
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -99,11 +115,11 @@ func TestHandleModEvent_NoChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandleAddEvent_ClusterExcluded(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return false
|
||||
},
|
||||
clusters: map[string]cache.ClusterCache{},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 2, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
clustersCache.handleAddEvent(&appv1.Cluster{
|
||||
Server: "https://mycluster",
|
||||
@@ -118,6 +134,8 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
Server: "https://mycluster",
|
||||
Config: appv1.ClusterConfig{Username: "bar"},
|
||||
}
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
|
||||
liveStateCacheLock := sync.RWMutex{}
|
||||
@@ -126,10 +144,8 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
testCluster.Server: gitopsEngineClusterCache,
|
||||
},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return true
|
||||
},
|
||||
settingsMgr: settingsMgr,
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
settingsMgr: settingsMgr,
|
||||
// Set the lock here so we can reference it later
|
||||
// nolint We need to overwrite here to have access to the lock
|
||||
lock: liveStateCacheLock,
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/healthz"
|
||||
"github.com/argoproj/argo-cd/v2/util/profile"
|
||||
|
||||
ctrl_metrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
type MetricsServer struct {
|
||||
@@ -160,12 +162,12 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
|
||||
|
||||
mux := http.NewServeMux()
|
||||
registry := NewAppRegistry(appLister, appFilter, appLabels)
|
||||
registry.MustRegister(depth, adds, latency, workDuration, unfinished, longestRunningProcessor, retries)
|
||||
|
||||
mux.Handle(MetricsPath, promhttp.HandlerFor(prometheus.Gatherers{
|
||||
// contains app controller specific metrics
|
||||
registry,
|
||||
// contains process, golang and controller workqueues metrics
|
||||
prometheus.DefaultGatherer,
|
||||
// contains workqueue metrics, process and golang metrics
|
||||
ctrl_metrics.Registry,
|
||||
}, promhttp.HandlerOpts{}))
|
||||
profile.RegisterProfiler(mux)
|
||||
healthz.ServeHealthCheck(mux, healthCheck)
|
||||
|
||||
@@ -2,6 +2,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -15,12 +16,15 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
|
||||
appinformer "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions"
|
||||
applister "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
)
|
||||
|
||||
const fakeApp = `
|
||||
@@ -140,6 +144,12 @@ var appFilter = func(obj interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Create a fake controller so we initialize the internal controller metrics.
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/blob/4000e996a202917ad7d40f02ed8a2079a9ce25e9/pkg/internal/controller/metrics/metrics.go
|
||||
_, _ = controller.New("test-controller", nil, controller.Options{})
|
||||
}
|
||||
|
||||
func newFakeApp(fakeAppYAML string) *argoappv1.Application {
|
||||
var app argoappv1.Application
|
||||
err := yaml.Unmarshal([]byte(fakeAppYAML), &app)
|
||||
@@ -360,7 +370,7 @@ func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
assert.Contains(t, body, line, "expected metrics mismatch")
|
||||
assert.Contains(t, body, line, fmt.Sprintf("expected metrics mismatch for line: %s", line))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,3 +453,70 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa
|
||||
err = metricsServ.SetExpiration(time.Second)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestWorkqueueMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
# TYPE workqueue_adds_total counter
|
||||
workqueue_adds_total{name="test"}
|
||||
|
||||
# TYPE workqueue_depth gauge
|
||||
workqueue_depth{name="test"}
|
||||
|
||||
# TYPE workqueue_longest_running_processor_seconds gauge
|
||||
workqueue_longest_running_processor_seconds{name="test"}
|
||||
|
||||
# TYPE workqueue_queue_duration_seconds histogram
|
||||
|
||||
# TYPE workqueue_unfinished_work_seconds gauge
|
||||
workqueue_unfinished_work_seconds{name="test"}
|
||||
|
||||
# TYPE workqueue_work_duration_seconds histogram
|
||||
`
|
||||
workqueue.NewNamed("test")
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
metricsServ.Handler.ServeHTTP(rr, req)
|
||||
assert.Equal(t, rr.Code, http.StatusOK)
|
||||
body := rr.Body.String()
|
||||
log.Println(body)
|
||||
assertMetricsPrinted(t, expectedMetrics, body)
|
||||
}
|
||||
|
||||
func TestGoMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds_sum
|
||||
go_gc_duration_seconds_count
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines
|
||||
# TYPE go_info gauge
|
||||
go_info
|
||||
# TYPE go_memstats_alloc_bytes gauge
|
||||
go_memstats_alloc_bytes
|
||||
# TYPE go_memstats_sys_bytes gauge
|
||||
go_memstats_sys_bytes
|
||||
# TYPE go_threads gauge
|
||||
go_threads
|
||||
`
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
metricsServ.Handler.ServeHTTP(rr, req)
|
||||
assert.Equal(t, rr.Code, http.StatusOK)
|
||||
body := rr.Body.String()
|
||||
log.Println(body)
|
||||
assertMetricsPrinted(t, expectedMetrics, body)
|
||||
}
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
WorkQueueSubsystem = "workqueue"
|
||||
DepthKey = "depth"
|
||||
AddsKey = "adds_total"
|
||||
QueueLatencyKey = "queue_duration_seconds"
|
||||
WorkDurationKey = "work_duration_seconds"
|
||||
UnfinishedWorkKey = "unfinished_work_seconds"
|
||||
LongestRunningProcessorKey = "longest_running_processor_seconds"
|
||||
RetriesKey = "retries_total"
|
||||
)
|
||||
|
||||
var (
|
||||
depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: DepthKey,
|
||||
Help: "Current depth of workqueue",
|
||||
}, []string{"name"})
|
||||
|
||||
adds = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: AddsKey,
|
||||
Help: "Total number of adds handled by workqueue",
|
||||
}, []string{"name"})
|
||||
|
||||
latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: QueueLatencyKey,
|
||||
Help: "How long in seconds an item stays in workqueue before being requested",
|
||||
Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180},
|
||||
}, []string{"name"})
|
||||
|
||||
workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: WorkDurationKey,
|
||||
Help: "How long in seconds processing an item from workqueue takes.",
|
||||
Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180},
|
||||
}, []string{"name"})
|
||||
|
||||
unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: UnfinishedWorkKey,
|
||||
Help: "How many seconds of work has been done that " +
|
||||
"is in progress and hasn't been observed by work_duration. Large " +
|
||||
"values indicate stuck threads. One can deduce the number of stuck " +
|
||||
"threads by observing the rate at which this increases.",
|
||||
}, []string{"name"})
|
||||
|
||||
longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: LongestRunningProcessorKey,
|
||||
Help: "How many seconds has the longest running " +
|
||||
"processor for workqueue been running.",
|
||||
}, []string{"name"})
|
||||
|
||||
retries = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: RetriesKey,
|
||||
Help: "Total number of retries handled by workqueue",
|
||||
}, []string{"name"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
workqueue.SetProvider(workqueueMetricsProvider{})
|
||||
}
|
||||
|
||||
type workqueueMetricsProvider struct{}
|
||||
|
||||
func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
|
||||
return depth.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
|
||||
return adds.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
|
||||
return latency.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
|
||||
return workDuration.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return unfinished.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return longestRunningProcessor.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
||||
return retries.WithLabelValues(name)
|
||||
}
|
||||
185
controller/sharding/cache.go
Normal file
185
controller/sharding/cache.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ClusterShardingCache interface {
|
||||
Init(clusters *v1alpha1.ClusterList)
|
||||
Add(c *v1alpha1.Cluster)
|
||||
Delete(clusterServer string)
|
||||
Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster)
|
||||
IsManagedCluster(c *v1alpha1.Cluster) bool
|
||||
GetDistribution() map[string]int
|
||||
}
|
||||
|
||||
type ClusterSharding struct {
|
||||
Shard int
|
||||
Replicas int
|
||||
Shards map[string]int
|
||||
Clusters map[string]*v1alpha1.Cluster
|
||||
lock sync.RWMutex
|
||||
getClusterShard DistributionFunction
|
||||
}
|
||||
|
||||
func NewClusterSharding(_ db.ArgoDB, shard, replicas int, shardingAlgorithm string) ClusterShardingCache {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
clusterSharding := &ClusterSharding{
|
||||
Shard: shard,
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
}
|
||||
distributionFunction := NoShardingDistributionFunction()
|
||||
if replicas > 1 {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
distributionFunction = GetDistributionFunction(clusterSharding.GetClusterAccessor(), shardingAlgorithm, replicas)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
clusterSharding.getClusterShard = distributionFunction
|
||||
return clusterSharding
|
||||
}
|
||||
|
||||
// IsManagedCluster returns wheter or not the cluster should be processed by a given shard.
|
||||
func (s *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard
|
||||
return true
|
||||
}
|
||||
clusterShard := 0
|
||||
if shard, ok := s.Shards[c.Server]; ok {
|
||||
clusterShard = shard
|
||||
} else {
|
||||
log.Warnf("The cluster %s has no assigned shard.", c.Server)
|
||||
}
|
||||
log.Debugf("Checking if cluster %s with clusterShard %d should be processed by shard %d", c.Server, clusterShard, s.Shard)
|
||||
return clusterShard == s.Shard
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
newClusters := make(map[string]*v1alpha1.Cluster, len(clusters.Items))
|
||||
for _, c := range clusters.Items {
|
||||
cluster := c
|
||||
newClusters[c.Server] = &cluster
|
||||
}
|
||||
sharding.Clusters = newClusters
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Add(c *v1alpha1.Cluster) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
old, ok := sharding.Clusters[c.Server]
|
||||
sharding.Clusters[c.Server] = c
|
||||
if !ok || hasShardingUpdates(old, c) {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. Cluster already added")
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Delete(clusterServer string) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
if _, ok := sharding.Clusters[clusterServer]; ok {
|
||||
delete(sharding.Clusters, clusterServer)
|
||||
delete(sharding.Shards, clusterServer)
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
if _, ok := sharding.Clusters[oldCluster.Server]; ok && oldCluster.Server != newCluster.Server {
|
||||
delete(sharding.Clusters, oldCluster.Server)
|
||||
delete(sharding.Shards, oldCluster.Server)
|
||||
}
|
||||
sharding.Clusters[newCluster.Server] = newCluster
|
||||
if hasShardingUpdates(oldCluster, newCluster) {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. No relevant changes")
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) GetDistribution() map[string]int {
|
||||
sharding.lock.RLock()
|
||||
defer sharding.lock.RUnlock()
|
||||
shards := sharding.Shards
|
||||
|
||||
distribution := make(map[string]int, len(shards))
|
||||
for k, v := range shards {
|
||||
distribution[k] = v
|
||||
}
|
||||
return distribution
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) updateDistribution() {
|
||||
for k, c := range sharding.Clusters {
|
||||
shard := 0
|
||||
if c.Shard != nil {
|
||||
requestedShard := int(*c.Shard)
|
||||
if requestedShard < sharding.Replicas {
|
||||
shard = requestedShard
|
||||
} else {
|
||||
log.Warnf("Specified cluster shard (%d) for cluster: %s is greater than the number of available shard (%d). Using shard 0.", requestedShard, c.Server, sharding.Replicas)
|
||||
}
|
||||
} else {
|
||||
shard = sharding.getClusterShard(c)
|
||||
}
|
||||
|
||||
existingShard, ok := sharding.Shards[k]
|
||||
if ok && existingShard != shard {
|
||||
log.Infof("Cluster %s has changed shard from %d to %d", k, existingShard, shard)
|
||||
} else if !ok {
|
||||
log.Infof("Cluster %s has been assigned to shard %d", k, shard)
|
||||
} else {
|
||||
log.Debugf("Cluster %s has not changed shard", k)
|
||||
}
|
||||
sharding.Shards[k] = shard
|
||||
}
|
||||
}
|
||||
|
||||
// hasShardingUpdates returns true if the sharding distribution has explicitly changed
|
||||
func hasShardingUpdates(old, new *v1alpha1.Cluster) bool {
|
||||
if old == nil || new == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// returns true if the cluster id has changed because some sharding algorithms depend on it.
|
||||
if old.ID != new.ID {
|
||||
return true
|
||||
}
|
||||
|
||||
if old.Server != new.Server {
|
||||
return true
|
||||
}
|
||||
|
||||
// return false if the shard field has not been modified
|
||||
if old.Shard == nil && new.Shard == nil {
|
||||
return false
|
||||
}
|
||||
return old.Shard == nil || new.Shard == nil || int64(*old.Shard) != int64(*new.Shard)
|
||||
}
|
||||
|
||||
func (d *ClusterSharding) GetClusterAccessor() clusterAccessor {
|
||||
return func() []*v1alpha1.Cluster {
|
||||
// no need to lock, as this is only called from the updateDistribution function
|
||||
clusters := make([]*v1alpha1.Cluster, 0, len(d.Clusters))
|
||||
for _, c := range d.Clusters {
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
}
|
||||
475
controller/sharding/cache_test.go
Normal file
475
controller/sharding/cache_test.go
Normal file
@@ -0,0 +1,475 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func setupTestSharding(shard int, replicas int) *ClusterSharding {
|
||||
shardingAlgorithm := "legacy" // we are using the legacy algorithm as it is deterministic based on the cluster id which is easier to test
|
||||
db := &dbmocks.ArgoDB{}
|
||||
return NewClusterSharding(db, shard, replicas, shardingAlgorithm).(*ClusterSharding)
|
||||
}
|
||||
|
||||
func TestNewClusterSharding(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
assert.NotNil(t, sharding)
|
||||
assert.Equal(t, shard, sharding.Shard)
|
||||
assert.Equal(t, replicas, sharding.Replicas)
|
||||
assert.NotNil(t, sharding.Shards)
|
||||
assert.NotNil(t, sharding.Clusters)
|
||||
}
|
||||
|
||||
func TestClusterSharding_Add(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
clusterA := &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}
|
||||
|
||||
sharding.Add(clusterA)
|
||||
|
||||
clusterB := v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}
|
||||
|
||||
sharding.Add(&clusterB)
|
||||
|
||||
distribution := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
|
||||
clusterDistribution, ok := distribution[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistribution)
|
||||
|
||||
myClusterDistribution, ok := distribution[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, myClusterDistribution)
|
||||
|
||||
assert.Equal(t, 2, len(distribution))
|
||||
}
|
||||
|
||||
func TestClusterSharding_AddRoundRobin_Redistributes(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
sharding := NewClusterSharding(db, shard, replicas, "round-robin").(*ClusterSharding)
|
||||
|
||||
clusterA := &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}
|
||||
sharding.Add(clusterA)
|
||||
|
||||
clusterB := v1alpha1.Cluster{
|
||||
ID: "3",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}
|
||||
sharding.Add(&clusterB)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
|
||||
clusterDistributionA, ok := distributionBefore[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionA)
|
||||
|
||||
clusterDistributionB, ok := distributionBefore[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistributionB)
|
||||
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
clusterC := v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://1.1.1.1",
|
||||
}
|
||||
sharding.Add(&clusterC)
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterC.Server)
|
||||
|
||||
clusterDistributionA, ok = distributionAfter[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionA)
|
||||
|
||||
clusterDistributionC, ok := distributionAfter[clusterC.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistributionC) // will be assigned to shard 1 because the .ID is smaller then the "B" cluster
|
||||
|
||||
clusterDistributionB, ok = distributionAfter[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionB) // will be reassigned to shard 0 because the .ID is bigger then the "C" cluster
|
||||
}
|
||||
|
||||
func TestClusterSharding_Delete(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
sharding.Delete("https://kubernetes.default.svc")
|
||||
distribution := sharding.GetDistribution()
|
||||
assert.Equal(t, 1, len(distribution))
|
||||
}
|
||||
|
||||
func TestClusterSharding_Update(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
distributionA, ok := distributionBefore["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, distributionA)
|
||||
|
||||
sharding.Update(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}, &v1alpha1.Cluster{
|
||||
ID: "4",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
})
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionAfter))
|
||||
|
||||
distributionA, ok = distributionAfter["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, distributionA)
|
||||
}
|
||||
|
||||
func TestClusterSharding_UpdateServerName(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
distributionA, ok := distributionBefore["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, distributionA)
|
||||
|
||||
sharding.Update(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}, &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server2",
|
||||
})
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionAfter))
|
||||
|
||||
_, ok = distributionAfter["https://kubernetes.default.svc"]
|
||||
assert.False(t, ok) // the old server name should not be present anymore
|
||||
|
||||
_, ok = distributionAfter["https://server2"]
|
||||
assert.True(t, ok) // the new server name should be present
|
||||
}
|
||||
|
||||
func TestClusterSharding_IsManagedCluster(t *testing.T) {
|
||||
replicas := 2
|
||||
sharding0 := setupTestSharding(0, replicas)
|
||||
|
||||
sharding0.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.True(t, sharding0.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}))
|
||||
|
||||
assert.False(t, sharding0.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}))
|
||||
|
||||
sharding1 := setupTestSharding(1, replicas)
|
||||
|
||||
sharding1.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.False(t, sharding1.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}))
|
||||
|
||||
assert.True(t, sharding1.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
func TestClusterSharding_ClusterShardOfResourceShouldNotBeChanged(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
Int64Ptr := func(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
clusterWithNil := &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
Shard: nil,
|
||||
}
|
||||
|
||||
clusterWithValue := &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
}
|
||||
|
||||
clusterWithToBigValue := &v1alpha1.Cluster{
|
||||
ID: "3",
|
||||
Server: "https://1.1.1.1",
|
||||
Shard: Int64Ptr(999), // shard value is explicitly bigger than the number of replicas
|
||||
}
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
*clusterWithNil,
|
||||
*clusterWithValue,
|
||||
*clusterWithToBigValue,
|
||||
},
|
||||
},
|
||||
)
|
||||
distribution := sharding.GetDistribution()
|
||||
assert.Equal(t, 3, len(distribution))
|
||||
|
||||
assert.Nil(t, sharding.Clusters[clusterWithNil.Server].Shard)
|
||||
|
||||
assert.NotNil(t, sharding.Clusters[clusterWithValue.Server].Shard)
|
||||
assert.Equal(t, int64(1), *sharding.Clusters[clusterWithValue.Server].Shard)
|
||||
assert.Equal(t, 1, distribution[clusterWithValue.Server])
|
||||
|
||||
assert.NotNil(t, sharding.Clusters[clusterWithToBigValue.Server].Shard)
|
||||
assert.Equal(t, int64(999), *sharding.Clusters[clusterWithToBigValue.Server].Shard)
|
||||
assert.Equal(t, 0, distribution[clusterWithToBigValue.Server]) // will be assigned to shard 0 because the value is bigger than the number of replicas
|
||||
}
|
||||
|
||||
func TestHasShardingUpdates(t *testing.T) {
|
||||
Int64Ptr := func(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
old *v1alpha1.Cluster
|
||||
new *v1alpha1.Cluster
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "No updates",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Updates",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Old is nil",
|
||||
old: nil,
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "New is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Both are nil",
|
||||
old: nil,
|
||||
new: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Both shards are nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Old shard is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "New shard is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Cluster ID has changed",
|
||||
old: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Server has changed",
|
||||
old: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server1",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server2",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, hasShardingUpdates(tc.old, tc.new))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
log "github.com/sirupsen/logrus"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -40,6 +42,7 @@ const ShardControllerMappingKey = "shardControllerMapping"
|
||||
|
||||
type DistributionFunction func(c *v1alpha1.Cluster) int
|
||||
type ClusterFilterFunction func(c *v1alpha1.Cluster) bool
|
||||
type clusterAccessor func() []*v1alpha1.Cluster
|
||||
|
||||
// shardApplicationControllerMapping stores the mapping of Shard Number to Application Controller in ConfigMap.
|
||||
// It also stores the heartbeat of last synced time of the application controller.
|
||||
@@ -53,8 +56,7 @@ type shardApplicationControllerMapping struct {
|
||||
// and returns wheter or not the cluster should be processed by a given shard. It calls the distributionFunction
|
||||
// to determine which shard will process the cluster, and if the given shard is equal to the calculated shard
|
||||
// the function will return true.
|
||||
func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, shard int) ClusterFilterFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, replicas, shard int) ClusterFilterFunction {
|
||||
return func(c *v1alpha1.Cluster) bool {
|
||||
clusterShard := 0
|
||||
if c != nil && c.Shard != nil {
|
||||
@@ -73,14 +75,14 @@ func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, s
|
||||
|
||||
// GetDistributionFunction returns which DistributionFunction should be used based on the passed algorithm and
|
||||
// the current datas.
|
||||
func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) DistributionFunction {
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction(db)
|
||||
func GetDistributionFunction(clusters clusterAccessor, shardingAlgorithm string, replicasCount int) DistributionFunction {
|
||||
log.Debugf("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction(replicasCount)
|
||||
switch shardingAlgorithm {
|
||||
case common.RoundRobinShardingAlgorithm:
|
||||
distributionFunction = RoundRobinDistributionFunction(db)
|
||||
distributionFunction = RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
case common.LegacyShardingAlgorithm:
|
||||
distributionFunction = LegacyDistributionFunction(db)
|
||||
distributionFunction = LegacyDistributionFunction(replicasCount)
|
||||
default:
|
||||
log.Warnf("distribution type %s is not supported, defaulting to %s", shardingAlgorithm, common.DefaultShardingAlgorithm)
|
||||
}
|
||||
@@ -92,15 +94,21 @@ func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) Distributio
|
||||
// is lightweight and can be distributed easily, however, it does not ensure an homogenous distribution as
|
||||
// some shards may get assigned more clusters than others. It is the legacy function distribution that is
|
||||
// kept for compatibility reasons
|
||||
func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
func LegacyDistributionFunction(replicas int) DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas == 0 {
|
||||
log.Debugf("Replicas count is : %d, returning -1", replicas)
|
||||
return -1
|
||||
}
|
||||
if c == nil {
|
||||
log.Debug("In-cluster: returning 0")
|
||||
return 0
|
||||
}
|
||||
// if Shard is manually set and the assigned value is lower than the number of replicas,
|
||||
// then its value is returned otherwise it is the default calculated value
|
||||
if c.Shard != nil && int(*c.Shard) < replicas {
|
||||
return int(*c.Shard)
|
||||
}
|
||||
id := c.ID
|
||||
log.Debugf("Calculating cluster shard for cluster id: %s", id)
|
||||
if id == "" {
|
||||
@@ -121,14 +129,19 @@ func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
// This function ensures an homogenous distribution: each shards got assigned the same number of
|
||||
// clusters +/-1 , but with the drawback of a reshuffling of clusters accross shards in case of some changes
|
||||
// in the cluster list
|
||||
func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
|
||||
func RoundRobinDistributionFunction(clusters clusterAccessor, replicas int) DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas > 0 {
|
||||
if c == nil { // in-cluster does not necessarly have a secret assigned. So we are receiving a nil cluster here.
|
||||
return 0
|
||||
}
|
||||
// if Shard is manually set and the assigned value is lower than the number of replicas,
|
||||
// then its value is returned otherwise it is the default calculated value
|
||||
if c.Shard != nil && int(*c.Shard) < replicas {
|
||||
return int(*c.Shard)
|
||||
} else {
|
||||
clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(db)
|
||||
clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(clusters)
|
||||
clusterIndex, ok := clusterIndexdByClusterIdMap[c.ID]
|
||||
if !ok {
|
||||
log.Warnf("Cluster with id=%s not found in cluster map.", c.ID)
|
||||
@@ -144,6 +157,12 @@ func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
}
|
||||
}
|
||||
|
||||
// NoShardingDistributionFunction returns a DistributionFunction that will process all cluster by shard 0
|
||||
// the function is created for API compatibility purposes and is not supposed to be activated.
|
||||
func NoShardingDistributionFunction() DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int { return 0 }
|
||||
}
|
||||
|
||||
// InferShard extracts the shard index based on its hostname.
|
||||
func InferShard() (int, error) {
|
||||
hostname, err := osHostnameFunction()
|
||||
@@ -152,33 +171,29 @@ func InferShard() (int, error) {
|
||||
}
|
||||
parts := strings.Split(hostname, "-")
|
||||
if len(parts) == 0 {
|
||||
return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
|
||||
log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
|
||||
return 0, nil
|
||||
}
|
||||
shard, err := strconv.Atoi(parts[len(parts)-1])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
|
||||
log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
|
||||
return 0, nil
|
||||
}
|
||||
return int(shard), nil
|
||||
}
|
||||
|
||||
func getSortedClustersList(db db.ArgoDB) []v1alpha1.Cluster {
|
||||
ctx := context.Background()
|
||||
clustersList, dbErr := db.ListClusters(ctx)
|
||||
if dbErr != nil {
|
||||
log.Warnf("Error while querying clusters list from database: %v", dbErr)
|
||||
return []v1alpha1.Cluster{}
|
||||
}
|
||||
clusters := clustersList.Items
|
||||
func getSortedClustersList(getCluster clusterAccessor) []*v1alpha1.Cluster {
|
||||
clusters := getCluster()
|
||||
sort.Slice(clusters, func(i, j int) bool {
|
||||
return clusters[i].ID < clusters[j].ID
|
||||
})
|
||||
return clusters
|
||||
}
|
||||
|
||||
func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
|
||||
clusters := getSortedClustersList(db)
|
||||
func createClusterIndexByClusterIdMap(getCluster clusterAccessor) map[string]int {
|
||||
clusters := getSortedClustersList(getCluster)
|
||||
log.Debugf("ClustersList has %d items", len(clusters))
|
||||
clusterById := make(map[string]v1alpha1.Cluster)
|
||||
clusterById := make(map[string]*v1alpha1.Cluster)
|
||||
clusterIndexedByClusterId := make(map[string]int)
|
||||
for i, cluster := range clusters {
|
||||
log.Debugf("Adding cluster with id=%s and name=%s to cluster's map", cluster.ID, cluster.Name)
|
||||
@@ -193,8 +208,7 @@ func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
|
||||
// The function takes the shard number from the environment variable (default value -1, if not set) and passes it to this function.
|
||||
// If the shard value passed to this function is -1, that is, the shard was not set as an environment variable,
|
||||
// we default the shard number to 0 for computing the default config map.
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient kubernetes.Interface, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
hostname, err := osHostnameFunction()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
@@ -351,3 +365,59 @@ func getDefaultShardMappingData(replicas int) []shardApplicationControllerMappin
|
||||
}
|
||||
return shardMappingData
|
||||
}
|
||||
|
||||
func GetClusterSharding(kubeClient kubernetes.Interface, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) (ClusterShardingCache, error) {
|
||||
var replicasCount int
|
||||
if enableDynamicClusterDistribution {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if app controller deployment is not found when dynamic cluster distribution is enabled error out
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: %v", err)
|
||||
}
|
||||
|
||||
if appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment replica count")
|
||||
}
|
||||
|
||||
} else {
|
||||
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if replicasCount > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution {
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shardNumber, err = GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
|
||||
if err != nil && !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shardNumber < 0 {
|
||||
var err error
|
||||
shardNumber, err = InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
if shardNumber > replicasCount {
|
||||
log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
|
||||
shardNumber = 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
shardNumber = 0
|
||||
}
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
return NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm), nil
|
||||
}
|
||||
|
||||
@@ -1,36 +1,44 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestGetShardByID_NotEmptyID(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "4"}))
|
||||
}
|
||||
|
||||
func TestGetShardByID_EmptyID(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(replicasCount)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, 0, shard)
|
||||
}
|
||||
|
||||
@@ -38,7 +46,7 @@ func TestGetShardByID_NoReplicas(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(0)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
@@ -46,16 +54,16 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunction(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(0)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
// Test with replicas set to 0
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, 0)
|
||||
assert.Equal(t, -1, distributionFunction(nil))
|
||||
assert.Equal(t, -1, distributionFunction(&cluster1))
|
||||
assert.Equal(t, -1, distributionFunction(&cluster2))
|
||||
@@ -65,137 +73,112 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *tes
|
||||
}
|
||||
|
||||
func TestGetClusterFilterDefault(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
clusterAccessor, _, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 2
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterLegacy(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterUnknown(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
// Test with replicas set to 0
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "unknown")
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, "unknown"), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := GetDistributionFunction(clusterAccessor, "unknown", replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "5")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 5
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
filter := GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, 0, filter(nil))
|
||||
assert.Equal(t, 4, filter(&cluster1))
|
||||
assert.Equal(t, 1, filter(&cluster2))
|
||||
assert.Equal(t, 2, filter(&cluster3))
|
||||
assert.Equal(t, 2, filter(&cluster4))
|
||||
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
cluster5 := &v1alpha1.Cluster{ID: "5", Shard: &fixedShard}
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
cluster5.Shard = &fixedShard
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
assert.False(t, filter(&cluster3))
|
||||
assert.True(t, filter(&cluster4))
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "4")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 4
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
|
||||
filter := GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, filter(nil), 0)
|
||||
assert.Equal(t, filter(&cluster1), 0)
|
||||
assert.Equal(t, filter(&cluster2), 1)
|
||||
assert.Equal(t, filter(&cluster3), 2)
|
||||
assert.Equal(t, filter(&cluster4), 3)
|
||||
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
var fixedShard int64 = 1
|
||||
cluster5 := v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterLegacyHash(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "hash")
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
assert.False(t, filter(&cluster3))
|
||||
assert.True(t, filter(&cluster4))
|
||||
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterWithEnvControllerShardingAlgorithms(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
shardIndex := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
|
||||
t.Run("legacy", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster4))
|
||||
assert.False(t, shardShouldProcessCluster(nil))
|
||||
})
|
||||
|
||||
t.Run("roundrobin", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster4))
|
||||
assert.False(t, shardShouldProcessCluster(nil))
|
||||
})
|
||||
cluster5 = v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters = []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
|
||||
t.Run("replicas set to 1", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(1).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster2))
|
||||
@@ -205,8 +188,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 2", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(2).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -216,8 +200,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 3", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(3).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 3
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -233,17 +218,19 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterNumber
|
||||
// Initial tests where showing that under 1024 clusters, execution time was around 400ms
|
||||
// and for 4096 clusters, execution time was under 9s
|
||||
// The other implementation was giving almost linear time of 400ms up to 10'000 clusters
|
||||
db := dbmocks.ArgoDB{}
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{}}
|
||||
clusterPointers := []*v1alpha1.Cluster{}
|
||||
for i := 0; i < 2048; i++ {
|
||||
cluster := createCluster(fmt.Sprintf("cluster-%d", i), fmt.Sprintf("%d", i))
|
||||
clusterList.Items = append(clusterList.Items, cluster)
|
||||
clusterPointers = append(clusterPointers, &cluster)
|
||||
}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
for i, c := range clusterList.Items {
|
||||
assert.Equal(t, i%2, distributionFunction(&c))
|
||||
replicasCount := 2
|
||||
t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
|
||||
_, db, _, _, _, _, _ := createTestClusters()
|
||||
clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
for i, c := range clusterPointers {
|
||||
assert.Equal(t, i%2, distributionFunction(c))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,12 +243,15 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
cluster5 := createCluster("cluster5", "5")
|
||||
cluster6 := createCluster("cluster6", "6")
|
||||
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor := getClusterAccessor(clusters)
|
||||
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
|
||||
// Test with replicas set to 2
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -272,17 +262,20 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
|
||||
// Now, the database knows cluster6. Shard should be assigned a proper shard
|
||||
clusterList.Items = append(clusterList.Items, cluster6)
|
||||
distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
|
||||
assert.Equal(t, 1, distributionFunction(&cluster6))
|
||||
|
||||
// Now, we remove the last added cluster, it should be unassigned as well
|
||||
clusterList.Items = clusterList.Items[:len(clusterList.Items)-1]
|
||||
distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
|
||||
assert.Equal(t, -1, distributionFunction(&cluster6))
|
||||
}
|
||||
|
||||
func TestGetShardByIndexModuloReplicasCountDistributionFunction(t *testing.T) {
|
||||
db, cluster1, cluster2, _, _, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
clusters, db, cluster1, cluster2, _, _, _ := createTestClusters()
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
|
||||
// Test that the function returns the correct shard for cluster1 and cluster2
|
||||
expectedShardForCluster1 := 0
|
||||
@@ -315,14 +308,14 @@ func TestInferShard(t *testing.T) {
|
||||
|
||||
osHostnameFunction = func() (string, error) { return "exampleshard", nil }
|
||||
_, err = InferShard()
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
osHostnameFunction = func() (string, error) { return "example-shard", nil }
|
||||
_, err = InferShard()
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
|
||||
func createTestClusters() (clusterAccessor, *dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
|
||||
db := dbmocks.ArgoDB{}
|
||||
cluster1 := createCluster("cluster1", "1")
|
||||
cluster2 := createCluster("cluster2", "2")
|
||||
@@ -330,10 +323,27 @@ func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster,
|
||||
cluster4 := createCluster("cluster4", "4")
|
||||
cluster5 := createCluster("cluster5", "5")
|
||||
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
|
||||
db.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
cluster1, cluster2, cluster3, cluster4, cluster5,
|
||||
}}, nil)
|
||||
return &db, cluster1, cluster2, cluster3, cluster4, cluster5
|
||||
return getClusterAccessor(clusters), &db, cluster1, cluster2, cluster3, cluster4, cluster5
|
||||
}
|
||||
|
||||
func getClusterAccessor(clusters []v1alpha1.Cluster) clusterAccessor {
|
||||
// Convert the array to a slice of pointers
|
||||
clusterPointers := getClusterPointers(clusters)
|
||||
clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
|
||||
return clusterAccessor
|
||||
}
|
||||
|
||||
func getClusterPointers(clusters []v1alpha1.Cluster) []*v1alpha1.Cluster {
|
||||
var clusterPointers []*v1alpha1.Cluster
|
||||
for i := range clusters {
|
||||
clusterPointers = append(clusterPointers, &clusters[i])
|
||||
}
|
||||
return clusterPointers
|
||||
}
|
||||
|
||||
func createCluster(name string, id string) v1alpha1.Cluster {
|
||||
@@ -676,3 +686,187 @@ func Test_getOrUpdateShardNumberForController(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClusterSharding(t *testing.T) {
|
||||
IntPtr := func(i int32) *int32 {
|
||||
return &i
|
||||
}
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.DefaultApplicationControllerName,
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: IntPtr(1),
|
||||
},
|
||||
}
|
||||
|
||||
deploymentMultiReplicas := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-application-controller-multi-replicas",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: IntPtr(3),
|
||||
},
|
||||
}
|
||||
|
||||
objects := append([]runtime.Object{}, deployment, deploymentMultiReplicas)
|
||||
kubeclientset := kubefake.NewSimpleClientset(objects...)
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "argocd", settings.WithRepoOrClusterChangedHandler(func() {
|
||||
}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
useDynamicSharding bool
|
||||
envsSetter func(t *testing.T)
|
||||
cleanup func()
|
||||
expectedShard int
|
||||
expectedReplicas int
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "Default sharding with statefulset",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Default sharding with deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Default sharding with deployment and multiple replicas",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Statefulset multiple replicas",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "3")
|
||||
osHostnameFunction = func() (string, error) { return "example-shard-3", nil }
|
||||
},
|
||||
cleanup: func() {
|
||||
osHostnameFunction = os.Hostname
|
||||
},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 3,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 1 replica",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 2 replica - and to high shard",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 2,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 2 replica",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
t.Setenv(common.EnvControllerShard, "1")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 1,
|
||||
expectedReplicas: 2,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with deployment and multiple replicas will read from configmap",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Dynamic sharding but missing deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "missing-deployment")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: deployments.apps \"missing-deployment\" not found"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.envsSetter(t)
|
||||
defer tc.cleanup()
|
||||
shardingCache, err := GetClusterSharding(kubeclientset, settingsMgr, "round-robin", tc.useDynamicSharding)
|
||||
|
||||
if shardingCache != nil {
|
||||
clusterSharding := shardingCache.(*ClusterSharding)
|
||||
assert.Equal(t, tc.expectedShard, clusterSharding.Shard)
|
||||
assert.Equal(t, tc.expectedReplicas, clusterSharding.Replicas)
|
||||
}
|
||||
|
||||
if tc.expectedErr != nil {
|
||||
if err != nil {
|
||||
assert.Equal(t, tc.expectedErr.Error(), err.Error())
|
||||
} else {
|
||||
t.Errorf("Expected error %v but got nil", tc.expectedErr)
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package sharding
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
@@ -22,9 +23,11 @@ func TestLargeShuffle(t *testing.T) {
|
||||
clusterList.Items = append(clusterList.Items, cluster)
|
||||
}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
clusterAccessor := getClusterAccessor(clusterList.Items)
|
||||
// Test with replicas set to 256
|
||||
t.Setenv(common.EnvControllerReplicas, "256")
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 256
|
||||
t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
for i, c := range clusterList.Items {
|
||||
assert.Equal(t, i%2567, distributionFunction(&c))
|
||||
}
|
||||
@@ -44,10 +47,11 @@ func TestShuffle(t *testing.T) {
|
||||
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5, cluster6}}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
|
||||
clusterAccessor := getClusterAccessor(clusterList.Items)
|
||||
// Test with replicas set to 3
|
||||
t.Setenv(common.EnvControllerReplicas, "3")
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 3
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
|
||||
14
docs/faq.md
14
docs/faq.md
@@ -36,7 +36,7 @@ which might cause health check to return `Progressing` state instead of `Healthy
|
||||
As workaround Argo CD allows providing [health check](operator-manual/health.md) customization which overrides default
|
||||
behavior.
|
||||
|
||||
If you are using Traefik for your Ingress, you can update the Traefik config to publish the loadBalancer IP using [publishedservice](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#publishedservice), which will resolve this issue.
|
||||
If you are using Traefik for your Ingress, you can update the Traefik config to publish the loadBalancer IP using [publishedservice](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#publishedservice), which will resolve this issue.
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
@@ -97,7 +97,7 @@ data:
|
||||
|
||||
## After deploying my Helm application with Argo CD I cannot see it with `helm ls` and other Helm commands
|
||||
|
||||
When deploying a Helm application Argo CD is using Helm
|
||||
When deploying a Helm application Argo CD is using Helm
|
||||
only as a template mechanism. It runs `helm template` and
|
||||
then deploys the resulting manifests on the cluster instead of doing `helm install`. This means that you cannot use any Helm command
|
||||
to view/verify the application. It is fully managed by Argo CD.
|
||||
@@ -140,15 +140,15 @@ Argo CD automatically sets the `app.kubernetes.io/instance` label and uses it to
|
||||
If the tool does this too, this causes confusion. You can change this label by setting
|
||||
the `application.instanceLabelKey` value in the `argocd-cm`. We recommend that you use `argocd.argoproj.io/instance`.
|
||||
|
||||
!!! note
|
||||
!!! note
|
||||
When you make this change your applications will become out of sync and will need re-syncing.
|
||||
|
||||
See [#1482](https://github.com/argoproj/argo-cd/issues/1482).
|
||||
|
||||
## How often does Argo CD check for changes to my Git or Helm repository ?
|
||||
|
||||
The default polling interval is 3 minutes (180 seconds).
|
||||
You can change the setting by updating the `timeout.reconciliation` value in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, Argo CD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications.
|
||||
The default polling interval is 3 minutes (180 seconds) with a configurable jitter.
|
||||
You can change the setting by updating the `timeout.reconciliation` value and the `timeout.reconciliation.jitter` in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, Argo CD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications.
|
||||
|
||||
|
||||
## Why Are My Resource Limits `Out Of Sync`?
|
||||
@@ -250,7 +250,7 @@ There are two parts to the message:
|
||||
|
||||
> map[name:**KEY_BC** value:150] map[name:**KEY_BC** value:500] map[name:**KEY_BD** value:250] map[name:**KEY_BD** value:500] map[name:KEY_BI value:something]
|
||||
|
||||
You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just
|
||||
You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just
|
||||
|
||||
`]`
|
||||
|
||||
@@ -259,7 +259,7 @@ There are two parts to the message:
|
||||
This includes all of the keys. It's included for debugging purposes -- you don't need to pay much attention to it. It will give you a hint about the precise location in the list for the duplicated keys:
|
||||
|
||||
> map[name:KEY_AA] map[name:KEY_AB] map[name:KEY_AC] map[name:KEY_AD] map[name:KEY_AE] map[name:KEY_AF] map[name:KEY_AG] map[name:KEY_AH] map[name:KEY_AI] map[name:KEY_AJ] map[name:KEY_AK] map[name:KEY_AL] map[name:KEY_AM] map[name:KEY_AN] map[name:KEY_AO] map[name:KEY_AP] map[name:KEY_AQ] map[name:KEY_AR] map[name:KEY_AS] map[name:KEY_AT] map[name:KEY_AU] map[name:KEY_AV] map[name:KEY_AW] map[name:KEY_AX] map[name:KEY_AY] map[name:KEY_AZ] map[name:KEY_BA] map[name:KEY_BB] map[name:**KEY_BC**] map[name:**KEY_BD**] map[name:KEY_BE] map[name:KEY_BF] map[name:KEY_BG] map[name:KEY_BH] map[name:KEY_BI] map[name:**KEY_BC**] map[name:**KEY_BD**]
|
||||
|
||||
|
||||
`]`
|
||||
|
||||
In this case, the duplicated keys have been **emphasized** to help you identify the problematic keys. Many editors have the ability to highlight all instances of a string, using such an editor can help with such problems.
|
||||
|
||||
@@ -111,16 +111,15 @@ In this example, the ApplicationSet controller will generate an `Application` re
|
||||
|
||||
## Template Patch
|
||||
|
||||
Templating is only available on string type. However, some uses cases may require to apply templating on other types.
|
||||
Templating is only available on string type. However, some use cases may require applying templating on other types.
|
||||
|
||||
Example:
|
||||
|
||||
- Set the automated sync policy
|
||||
- Switch prune boolean to true
|
||||
- Add multiple helm value files
|
||||
|
||||
Argo CD has a `templatePatch` feature to allow advanced templating. It supports both json and yaml.
|
||||
- Conditionally set the automated sync policy.
|
||||
- Conditionally switch prune boolean to `true`.
|
||||
- Add multiple helm value files from a list.
|
||||
|
||||
The `templatePatch` feature enables advanced templating, with support for `json` and `yaml`.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -174,3 +173,6 @@ spec:
|
||||
|
||||
The `spec.project` field is not supported in `templatePatch`. If you need to change the project, you can use the
|
||||
`spec.project` field in the `template` field.
|
||||
|
||||
!!! important
|
||||
When writing a `templatePatch`, you're crafting a patch. So, if the patch includes an empty `spec: # nothing in here`, it will effectively clear out existing fields. See [#17040](https://github.com/argoproj/argo-cd/issues/17040) for an example of this behavior.
|
||||
|
||||
@@ -316,6 +316,12 @@ data:
|
||||
# published to the repository. Reconciliation by timeout is disabled if timeout is set to 0. Three minutes by default.
|
||||
# > Note: argocd-repo-server deployment must be manually restarted after changing the setting.
|
||||
timeout.reconciliation: 180s
|
||||
# With a large number of applications, the periodic refresh for each application can cause a spike in the refresh queue
|
||||
# and can cause a spike in the repo-server component. To avoid this, you can set a jitter to the sync timeout, which will
|
||||
# spread out the refreshes and give time to the repo-server to catch up. The jitter is the maximum duration that can be
|
||||
# added to the sync timeout. So, if the sync timeout is 3 minutes and the jitter is 1 minute, then the actual timeout will
|
||||
# be between 3 and 4 minutes. Disabled when the value is 0, defaults to 0.
|
||||
timeout.reconciliation.jitter: 0
|
||||
|
||||
# cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default.
|
||||
cluster.inClusterEnabled: "true"
|
||||
|
||||
@@ -90,6 +90,9 @@ data:
|
||||
server.k8sclient.retry.max: "0"
|
||||
# The initial backoff delay on the first retry attempt in ms. Subsequent retries will double this backoff time up to a maximum threshold
|
||||
server.k8sclient.retry.base.backoff: "100"
|
||||
# Semicolon-separated list of content types allowed on non-GET requests. Set an empty string to allow all. Be aware
|
||||
# that allowing content types besides application/json may make your API more vulnerable to CSRF attacks.
|
||||
server.api.content.types: "application/json"
|
||||
|
||||
# Set the logging format. One of: text|json (default "text")
|
||||
server.log.format: "text"
|
||||
|
||||
@@ -17,16 +17,10 @@ which does not require a restart of the application controller pods.
|
||||
|
||||
## Enabling Dynamic Distribution of Clusters
|
||||
|
||||
This feature is disabled by default while it is in alpha. To enable it, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller.
|
||||
|
||||
In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize
|
||||
overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. The
|
||||
dynamic distribution code automatically kicks in when the controller is deployed as a Deployment.
|
||||
This feature is disabled by default while it is in alpha. In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. Also, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller as a deployment.
|
||||
|
||||
!!! important
|
||||
The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of
|
||||
this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes
|
||||
to avoid issues.
|
||||
The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes to avoid issues.
|
||||
|
||||
Note the introduction of new environment variable `ARGOCD_CONTROLLER_HEARTBEAT_TIME`. The environment variable is explained in [working of Dynamic Distribution Heartbeat Process](#working-of-dynamic-distribution)
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ performance. For performance reasons the controller monitors and caches only the
|
||||
preferred version into a version of the resource stored in Git. If `kubectl convert` fails because the conversion is not supported then the controller falls back to Kubernetes API query which slows down
|
||||
reconciliation. In this case, we advise to use the preferred resource version in Git.
|
||||
|
||||
* The controller polls Git every 3m by default. You can change this duration using the `timeout.reconciliation` setting in the `argocd-cm` ConfigMap. The value of `timeout.reconciliation` is a duration string e.g `60s`, `1m`, `1h` or `1d`.
|
||||
* The controller polls Git every 3m by default. You can change this duration using the `timeout.reconciliation` and `timeout.reconciliation.jitter` setting in the `argocd-cm` ConfigMap. The value of the fields is a duration string e.g `60s`, `1m`, `1h` or `1d`.
|
||||
|
||||
* If the controller is managing too many clusters and uses too much memory then you can shard clusters across multiple
|
||||
controller replicas. To enable sharding, increase the number of replicas in `argocd-application-controller` `StatefulSet`
|
||||
@@ -244,30 +244,41 @@ spec:
|
||||
# ...
|
||||
```
|
||||
|
||||
### Application Sync Timeout & Jitter
|
||||
|
||||
Argo CD has a timeout for application syncs. It will trigger a refresh for each application periodically when the timeout expires.
|
||||
With a large number of applications, this will cause a spike in the refresh queue and can cause a spike to the repo-server component. To avoid this, you can set a jitter to the sync timeout which will spread out the refreshes and give time to the repo-server to catch up.
|
||||
|
||||
The jitter is the maximum duration that can be added to the sync timeout, so if the sync timeout is 5 minutes and the jitter is 1 minute, then the actual timeout will be between 5 and 6 minutes.
|
||||
|
||||
To configure the jitter you can set the following environment variables:
|
||||
|
||||
* `ARGOCD_RECONCILIATION_JITTER` - The jitter to apply to the sync timeout. Disabled when value is 0. Defaults to 0.
|
||||
|
||||
## Rate Limiting Application Reconciliations
|
||||
|
||||
To prevent high controller resource usage or sync loops caused either due to misbehaving apps or other environment specific factors,
|
||||
To prevent high controller resource usage or sync loops caused either due to misbehaving apps or other environment specific factors,
|
||||
we can configure rate limits on the workqueues used by the application controller. There are two types of rate limits that can be configured:
|
||||
|
||||
* Global rate limits
|
||||
* Per item rate limits
|
||||
|
||||
The final rate limiter uses a combination of both and calculates the final backoff as `max(globalBackoff, perItemBackoff)`.
|
||||
The final rate limiter uses a combination of both and calculates the final backoff as `max(globalBackoff, perItemBackoff)`.
|
||||
|
||||
### Global rate limits
|
||||
|
||||
This is enabled by default, it is a simple bucket based rate limiter that limits the number of items that can be queued per second.
|
||||
This is useful to prevent a large number of apps from being queued at the same time.
|
||||
|
||||
This is disabled by default, it is a simple bucket based rate limiter that limits the number of items that can be queued per second.
|
||||
This is useful to prevent a large number of apps from being queued at the same time.
|
||||
|
||||
To configure the bucket limiter you can set the following environment variables:
|
||||
|
||||
* `WORKQUEUE_BUCKET_SIZE` - The number of items that can be queued in a single burst. Defaults to 500.
|
||||
* `WORKQUEUE_BUCKET_QPS` - The number of items that can be queued per second. Defaults to 50.
|
||||
* `WORKQUEUE_BUCKET_QPS` - The number of items that can be queued per second. Defaults to MaxFloat64, which disables the limiter.
|
||||
|
||||
### Per item rate limits
|
||||
### Per item rate limits
|
||||
|
||||
This by default returns a fixed base delay/backoff value but can be configured to return exponential values, read further to understand it's working.
|
||||
Per item rate limiter limits the number of times a particular item can be queued. This is based on exponential backoff where the backoff time for an item keeps increasing exponentially
|
||||
This by default returns a fixed base delay/backoff value but can be configured to return exponential values.
|
||||
Per item rate limiter limits the number of times a particular item can be queued. This is based on exponential backoff where the backoff time for an item keeps increasing exponentially
|
||||
if it is queued multiple times in a short period, but the backoff is reset automatically if a configured `cool down` period has elapsed since the last time the item was queued.
|
||||
|
||||
To configure the per item limiter you can set the following environment variables:
|
||||
@@ -277,16 +288,16 @@ To configure the per item limiter you can set the following environment variable
|
||||
* `WORKQUEUE_MAX_DELAY_NS` : The max delay in nanoseconds, this is the max backoff limit. Defaults to 3 * 10^9 (=3s)
|
||||
* `WORKQUEUE_BACKOFF_FACTOR` : The backoff factor, this is the factor by which the backoff is increased for each retry. Defaults to 1.5
|
||||
|
||||
The formula used to calculate the backoff time for an item, where `numRequeue` is the number of times the item has been queued
|
||||
The formula used to calculate the backoff time for an item, where `numRequeue` is the number of times the item has been queued
|
||||
and `lastRequeueTime` is the time at which the item was last queued:
|
||||
|
||||
- When `WORKQUEUE_FAILURE_COOLDOWN_NS` != 0 :
|
||||
|
||||
```
|
||||
backoff = time.Since(lastRequeueTime) >= WORKQUEUE_FAILURE_COOLDOWN_NS ?
|
||||
WORKQUEUE_BASE_DELAY_NS :
|
||||
backoff = time.Since(lastRequeueTime) >= WORKQUEUE_FAILURE_COOLDOWN_NS ?
|
||||
WORKQUEUE_BASE_DELAY_NS :
|
||||
min(
|
||||
WORKQUEUE_MAX_DELAY_NS,
|
||||
WORKQUEUE_MAX_DELAY_NS,
|
||||
WORKQUEUE_BASE_DELAY_NS * WORKQUEUE_BACKOFF_FACTOR ^ (numRequeue)
|
||||
)
|
||||
```
|
||||
|
||||
@@ -67,7 +67,7 @@ metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
application.namespaces: app-team-one, app-team-two
|
||||
notificationscontroller.selfservice.enabled: true
|
||||
notificationscontroller.selfservice.enabled: "true"
|
||||
```
|
||||
|
||||
To use this feature, you can deploy configmap named `argocd-notifications-cm` and possibly a secret `argocd-notifications-secret` in the namespace where the Argo CD application lives.
|
||||
|
||||
@@ -12,8 +12,8 @@ To be able to send notifications with argocd-notifications you have to create an
|
||||
8. Give your integration a name, copy the "API key" and safe it somewhere for later
|
||||
9. Make sure the checkboxes for "Create and Update Access" and "enable" are selected, disable the other checkboxes to remove unnecessary permissions
|
||||
10. Click "Safe Integration" at the bottom
|
||||
11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the us/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (european api).
|
||||
12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the US/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (European API).
|
||||
12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the Opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
# Pagerduty
|
||||
# PagerDuty
|
||||
|
||||
## Parameters
|
||||
|
||||
The Pagerduty notification service is used to create pagerduty incidents and requires specifying the following settings:
|
||||
The PagerDuty notification service is used to create PagerDuty incidents and requires specifying the following settings:
|
||||
|
||||
* `pagerdutyToken` - the pagerduty auth token
|
||||
* `pagerdutyToken` - the PagerDuty auth token
|
||||
* `from` - email address of a valid user associated with the account making the request.
|
||||
* `serviceID` - The ID of the resource.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
The following snippet contains sample Pagerduty service configuration:
|
||||
The following snippet contains sample PagerDuty service configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -35,7 +35,7 @@ data:
|
||||
|
||||
## Template
|
||||
|
||||
[Notification templates](../templates.md) support specifying subject for pagerduty notifications:
|
||||
[Notification templates](../templates.md) support specifying subject for PagerDuty notifications:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -62,5 +62,5 @@ apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "<serviceID for Pagerduty>"
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "<serviceID for PagerDuty>"
|
||||
```
|
||||
|
||||
@@ -74,5 +74,5 @@ apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "<serviceID for Pagerduty>"
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "<serviceID for PagerDuty>"
|
||||
```
|
||||
|
||||
@@ -17,6 +17,7 @@ argocd-application-controller [flags]
|
||||
```
|
||||
--app-hard-resync int Time period in seconds for application hard resync.
|
||||
--app-resync int Time period in seconds for application resync. (default 180)
|
||||
--app-resync-jitter int Maximum time period in seconds to add as a delay jitter for application resync.
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
|
||||
--as string Username to impersonate for the operation
|
||||
@@ -76,7 +77,7 @@ argocd-application-controller [flags]
|
||||
--username string Username for basic authentication to the API server
|
||||
--wq-backoff-factor float Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5 (default 1.5)
|
||||
--wq-basedelay-ns duration Set Workqueue Per Item Rate Limiter Base Delay duration in nanoseconds, default 1000000 (1ms) (default 1ms)
|
||||
--wq-bucket-qps int Set Workqueue Rate Limiter Bucket QPS, default 50 (default 50)
|
||||
--wq-bucket-qps float Set Workqueue Rate Limiter Bucket QPS, default set to MaxFloat64 which disables the bucket limiter (default 1.7976931348623157e+308)
|
||||
--wq-bucket-size int Set Workqueue Rate Limiter Bucket Size, default 500 (default 500)
|
||||
--wq-cooldown-ns duration Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)
|
||||
--wq-maxdelay-ns duration Set Workqueue Per Item Rate Limiter Max Delay duration in nanoseconds, default 1000000000 (1s) (default 1s)
|
||||
|
||||
@@ -26,6 +26,7 @@ argocd-server [flags]
|
||||
|
||||
```
|
||||
--address string Listen on given address (default "0.0.0.0")
|
||||
--api-content-types string Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty. (default "application/json")
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces where application resources can be managed in
|
||||
--as string Username to impersonate for the operation
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 2.7 | v1.26, v1.25, v1.24, v1.23 |
|
||||
| 2.6 | v1.24, v1.23, v1.22 |
|
||||
| 2.5 | v1.24, v1.23, v1.22 |
|
||||
|
||||
| 2.10 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.9 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.8 | v1.27, v1.26, v1.25, v1.24 |
|
||||
|
||||
@@ -10,3 +10,7 @@ removed.
|
||||
|
||||
To avoid unexpected behavior, follow the [client-side to server-side resource upgrade guide](https://kubernetes.io/docs/reference/using-api/server-side-apply/#upgrading-from-client-side-apply-to-server-side-apply)
|
||||
before enabling `managedNamespaceMetadata` on an existing namespace.
|
||||
|
||||
## Upgraded Helm Version
|
||||
|
||||
Note that bundled Helm version has been upgraded from 3.13.2 to 3.14.3.
|
||||
|
||||
@@ -62,6 +62,6 @@ argocd admin cluster namespaces my-cluster
|
||||
* [argocd admin cluster generate-spec](argocd_admin_cluster_generate-spec.md) - Generate declarative config for a cluster
|
||||
* [argocd admin cluster kubeconfig](argocd_admin_cluster_kubeconfig.md) - Generates kubeconfig for the specified cluster
|
||||
* [argocd admin cluster namespaces](argocd_admin_cluster_namespaces.md) - Print information namespaces which Argo CD manages in each cluster.
|
||||
* [argocd admin cluster shards](argocd_admin_cluster_shards.md) - Print information about each controller shard and portion of Kubernetes resources it is responsible for.
|
||||
* [argocd admin cluster shards](argocd_admin_cluster_shards.md) - Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.
|
||||
* [argocd admin cluster stats](argocd_admin_cluster_stats.md) - Prints information cluster statistics and inferred shard number
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## argocd admin cluster shards
|
||||
|
||||
Print information about each controller shard and portion of Kubernetes resources it is responsible for.
|
||||
Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.
|
||||
|
||||
```
|
||||
argocd admin cluster shards [flags]
|
||||
@@ -43,6 +43,7 @@ argocd admin cluster shards [flags]
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--shard int Cluster shard filter (default -1)
|
||||
--sharding-method string Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
|
||||
@@ -57,6 +57,7 @@ argocd admin cluster stats target-cluster
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--shard int Cluster shard filter (default -1)
|
||||
--sharding-method string Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
|
||||
@@ -56,7 +56,13 @@ Application.
|
||||
Add the following entry in the argocd-cmd-params-cm configmap:
|
||||
|
||||
```
|
||||
controller.diff.server.side: "true"
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
controller.diff.server.side: "true"
|
||||
...
|
||||
```
|
||||
|
||||
Note: It is necessary to restart the `argocd-application-controller`
|
||||
|
||||
@@ -106,6 +106,37 @@ spec:
|
||||
namespace: default
|
||||
```
|
||||
|
||||
## Components
|
||||
Kustomize [components](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/components.md) encapsulate both resources and patches together. They provide a powerful way to modularize and reuse configuration in Kubernetes applications.
|
||||
|
||||
Outside of Argo CD, to utilize components, you must add the following to the `kustomization.yaml` that the Application references. For example:
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
...
|
||||
components:
|
||||
- ../component
|
||||
```
|
||||
|
||||
With support added for components in `v2.10.0`, you can now reference a component directly in the Application:
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: application-kustomize-components
|
||||
spec:
|
||||
...
|
||||
source:
|
||||
path: examples/application-kustomize-components/base
|
||||
repoURL: https://github.com/my-user/my-repo
|
||||
targetRevision: main
|
||||
|
||||
# This!
|
||||
kustomize:
|
||||
components:
|
||||
- ../component # relative to the kustomization.yaml (`source.path`).
|
||||
```
|
||||
|
||||
## Private Remote Bases
|
||||
|
||||
If you have remote bases that are either (a) HTTPS and need username/password (b) SSH and need SSH private key, then they'll inherit that from the app's repo.
|
||||
|
||||
@@ -8,9 +8,9 @@ and after a Sync operation. Hooks can also be run if a Sync operation fails at a
|
||||
* Using a `Sync` hook to orchestrate a complex deployment requiring more sophistication than the
|
||||
Kubernetes rolling update strategy.
|
||||
* Using a `PostSync` hook to run integration and health checks after a deployment.
|
||||
* Using a `SyncFail` hook to run clean-up or finalizer logic if a Sync operation fails. _`SyncFail` hooks are only available starting in v1.2_
|
||||
* Using a `PostDelete` hook to run clean-up or finalizer logic after an all Application resources are deleted. Please note that
|
||||
`PostDelete` hooks are only deleted if delete policy matches to the aggregated deletion hooks status and not garbage collected after the application is deleted.
|
||||
* Using a `SyncFail` hook to run clean-up or finalizer logic if a Sync operation fails.
|
||||
* Using a `PostDelete` hook to run clean-up or finalizer logic after all Application resources are deleted. Please note that
|
||||
`PostDelete` hooks are only deleted if the delete policy matches the aggregated deletion hooks status and not garbage collected after the application is deleted.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -39,7 +39,8 @@ The following hooks are defined:
|
||||
| `Sync` | Executes after all `PreSync` hooks completed and were successful, at the same time as the application of the manifests. |
|
||||
| `Skip` | Indicates to Argo CD to skip the application of the manifest. |
|
||||
| `PostSync` | Executes after all `Sync` hooks completed and were successful, a successful application, and all resources in a `Healthy` state. |
|
||||
| `SyncFail` | Executes when the sync operation fails. _Available starting in v1.2_ |
|
||||
| `SyncFail` | Executes when the sync operation fails. |
|
||||
| `PostDelete` | Executes after all Application resources are deleted. _Available starting in v2.10._ |
|
||||
|
||||
### Generate Name
|
||||
|
||||
|
||||
63
go.mod
63
go.mod
@@ -13,10 +13,10 @@ require (
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d
|
||||
github.com/alicebob/miniredis/v2 v2.30.4
|
||||
github.com/antonmedv/expr v1.15.2
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20231218194513-aba38192fb16
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240122213038-792124280fcc
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1
|
||||
github.com/aws/aws-sdk-go v1.44.317
|
||||
github.com/aws/aws-sdk-go v1.50.8
|
||||
github.com/bmatcuk/doublestar/v4 v4.6.0
|
||||
github.com/bombsimon/logrusr/v2 v2.0.1
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.6.0
|
||||
@@ -28,7 +28,8 @@ require (
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gfleury/go-bitbucket-v1 v0.0.0-20220301131131-8e7ed04b843e
|
||||
github.com/go-git/go-git/v5 v5.8.1
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/go-jose/go-jose/v3 v3.0.1
|
||||
github.com/go-logr/logr v1.3.0
|
||||
github.com/go-openapi/loads v0.21.2
|
||||
github.com/go-openapi/runtime v0.26.0
|
||||
@@ -80,19 +81,18 @@ require (
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/crypto v0.16.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/term v0.13.0
|
||||
golang.org/x/term v0.15.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/square/go-jose.v2 v2.6.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.26.11
|
||||
k8s.io/apiextensions-apiserver v0.26.4
|
||||
k8s.io/apiextensions-apiserver v0.26.10
|
||||
k8s.io/apimachinery v0.26.11
|
||||
k8s.io/apiserver v0.26.11
|
||||
k8s.io/client-go v0.26.11
|
||||
@@ -103,7 +103,7 @@ require (
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
|
||||
layeh.com/gopher-json v0.0.0-20190114024228-97fed8db8427
|
||||
oras.land/oras-go/v2 v2.3.0
|
||||
sigs.k8s.io/controller-runtime v0.14.6
|
||||
sigs.k8s.io/controller-runtime v0.14.7
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
@@ -114,19 +114,20 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/google/s2a-go v0.1.4 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
|
||||
@@ -159,9 +160,8 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -184,8 +184,7 @@ require (
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.5.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
@@ -243,7 +242,7 @@ require (
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
@@ -251,7 +250,7 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sergi/go-diff v1.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.1 // indirect
|
||||
github.com/slack-go/slack v0.12.2 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
@@ -268,11 +267,11 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/net v0.19.0
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.12.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
gomodules.xyz/notify v0.1.1 // indirect
|
||||
|
||||
123
go.sum
123
go.sum
@@ -657,8 +657,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0 h1:S1NcMKECxT5hJwV4VT+QzeSsSiv4oWl1s2821dUqG/8=
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0/go.mod h1:PuFyJKRz1liIAH4h5KVXVD18Obpp1ZXRdxHvmGXooro=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 h1:prBTRx78AQnXzivNT9Crhu564W/zPPr3ibSlpT9xKcE=
|
||||
@@ -668,8 +668,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d h1:WtAMR0fPCOfK7TPGZ8ZpLLY18HRvL7XJ3xcs0wnREgo=
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d/go.mod h1:WML6KOYjeU8N6YyusMjj2qRvaPNUEvrQvaxuFcMRFJY=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
|
||||
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
|
||||
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
|
||||
@@ -696,10 +694,10 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20231218194513-aba38192fb16 h1:kR15L8UsSVr7oitABKU88msQirMT0/RO/KRla1jkq/s=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20231218194513-aba38192fb16/go.mod h1:gWE8uROi7hIkWGNAVM+8FWkMfo0vZ03SLx/aFw/DBzg=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9 h1:1lt0VXzmLK7Vv0kaeal3S6/JIfzPyBORkUWXhiqF3l0=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9/go.mod h1:E/vv4+by868m0mmflaRfGBmKBtAupoF+mmyfekP8QCk=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240122213038-792124280fcc h1:Fv94Mi2WvtvPkEH5WoWC3iy/VoQRLeSsE0hyg0n2UkY=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240122213038-792124280fcc/go.mod h1:gWE8uROi7hIkWGNAVM+8FWkMfo0vZ03SLx/aFw/DBzg=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604 h1:pMfBao6Vm1Ax0xGIp9BWEia2nKkccHwV0dTEdrsFOpo=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604/go.mod h1:TsyusmXQWIL0ST7YMRG/ered7WlWDmbmnPpXnS2LJmM=
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1 h1:qsHwwOJ21K2Ao0xPju1sNuqphyMnMYkyB3ZLoLtxWpo=
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1/go.mod h1:CZHlkyAD1/+FbEn6cB2DQTj48IoLGvEYsWEvtzP3238=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@@ -715,35 +713,37 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU=
|
||||
github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.50.8 h1:gY0WoOW+/Wz6XmYSgDH9ge3wnAevYDSQWPxxJvqAkP4=
|
||||
github.com/aws/aws-sdk-go v1.50.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8 h1:lDpy0WM8AHsywOnVrOHaSMfpaiV2igOw8D7svkFkXVA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8/go.mod h1:5XCmmyutmzzgkpk/6NYTjeWb6lgo9N170m1j6pQkIBs=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 h1:vTrwTvv5qAwjWIGhZDSBH/oQHuIQjGmD232k01FUh6A=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8/go.mod h1:lVa4OHbvgjVot4gmh1uouF1ubgexSCN92P6CJQpT0t8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 h1:KeTxcGdNnQudb46oOl4d90f2I33DF/c6q3RnZAmvQdQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28/go.mod h1:yRZVr/iT0AqyHeep00SZ4YfBAKojXz08w3XMBscdi0c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 h1:tQoMg8i4nFAB70cJ4wiAYEiZRYo2P6uDmU2D6ys/igo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0/go.mod h1:jQhN5f4p3PALMNlUtfb/0wGIFlV7vGtJlPDVfxfNfPY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 h1:/2gzjhQowRLarkkBOGPXSRnb8sQ2RVsjdG1C/UliK/c=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0/go.mod h1:wo/B7uUm/7zw/dWhBJ4FXuw1sySU5lyIhVg1Bu2yL9A=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 h1:Jfly6mRxk2ZOSlbCvZfKNS7TukSx1mIzhSsqZ/IGSZI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0/go.mod h1:TZSH7xLO7+phDtViY/KUp9WGCJMQkLJ/VpgkTFd5gh8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 h1:kOO++CYo50RcTFISESluhWEi5Prhg+gaSs4whWabiZU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12 h1:mF4cMuNh/2G+d19nWnm1vJ/ak0qK6SbqF0KtSX9pxu0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12/go.mod h1:lOvvqtZP9p29GIjOTuA/76HiVk0c/s8qRcFRq2+E2uc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 h1:tRNrFDGRm81e6nTX5Q4CFblea99eAfm0dxXazGpLceU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7/go.mod h1:8GWUDux5Z2h6z2efAtr54RdHXtLm8sq7Rg85ZNY/CZM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
|
||||
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
|
||||
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@@ -850,8 +850,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0=
|
||||
github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
@@ -925,12 +925,12 @@ github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H
|
||||
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
|
||||
github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
|
||||
github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A=
|
||||
github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo=
|
||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -1377,8 +1377,6 @@ github.com/malexdev/utfutil v0.0.0-20180510171754-00c8d4a8e7a8 h1:A6SLdFpRzUUF5v
|
||||
github.com/malexdev/utfutil v0.0.0-20180510171754-00c8d4a8e7a8/go.mod h1:UtpLyb/EupVKXF/N0b4NRe1DNg+QYJsnsHQ038romhM=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
|
||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
@@ -1498,8 +1496,9 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ
|
||||
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
|
||||
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
@@ -1625,8 +1624,8 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
|
||||
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
|
||||
github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo=
|
||||
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
|
||||
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ=
|
||||
@@ -1815,8 +1814,9 @@ golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1961,8 +1961,9 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -2134,8 +2135,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -2149,8 +2150,9 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -2169,8 +2171,9 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -2260,8 +2263,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
|
||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -2596,8 +2599,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/retry.v1 v1.0.3 h1:a9CArYczAVv6Qs6VGoLMio99GEs7kY9UzSF9+LD+iGs=
|
||||
gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
@@ -2705,8 +2706,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/controller-runtime v0.14.7 h1:Vrnm2vk9ZFlRkXATHz0W0wXcqNl7kPat8q2JyxVy0Q8=
|
||||
sigs.k8s.io/controller-runtime v0.14.7/go.mod h1:ErTs3SJCOujNUnTz4AS+uh8hp6DHMo1gj6fFndJT1X8=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
f43e1c3387de24547506ab05d24e5309c0ce0b228c23bd8aa64e9ec4b8206651 helm-v3.14.0-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
b29e61674731b15f6ad3d1a3118a99d3cc2ab25a911aad1b8ac8c72d5a9d2952 helm-v3.14.0-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f1f9d3561724863edd4c06d89acb2e2fd8ae0f1b72058ceb891fa1c346ce5dbc helm-v3.14.0-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
82298ef39936f1bef848959a29f77bff92d1309d8646657e3a7733702e81288c helm-v3.14.0-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
75496ea824f92305ff7d28af37f4af57536bf5138399c824dff997b9d239dd42 helm-v3.14.1-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f865b8ad4228fd0990bbc5b50615eb6cb9eb31c9a9ca7238401ed897bbbe9033 helm-v3.14.1-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
4d853ab8fe3462287c7272fbadd5f73531ecdd6fa0db37d31630e41ae1ae21de helm-v3.14.1-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
19bf07999c7244bfeb0fd27152919b9faa1148cf43910edbb98efa9150058a98 helm-v3.14.1-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
0885a501d586c1e949e9b113bf3fb3290b0bbf74db9444a1d8c2723a143006a5 helm-v3.14.2-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
c65d6a9557bb359abc2c0d26670de850b52327dc3976ad6f9e14c298ea3e1b61 helm-v3.14.2-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f3bc8582ff151e619cd285d9cdf9fef1c5733ee5522d8bed2ef680ef07f87223 helm-v3.14.2-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
7bda34aa26638e5116b31385f3b781172572175bf4c1ae00c87d8b154458ed94 helm-v3.14.2-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
4d5d01a94c7d6b07e71690dc1988bf3229680284c87f4242d28c6f1cc99653be helm-v3.14.3-darwin-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
dff794152b62b7c1a9ff615d510f8657bcd7a3727c668e0d9d4955f70d5f7573 helm-v3.14.3-darwin-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
3c90f24e180f8c207b8a18e5ec82cb0fa49858a7a0a86e4ed52a98398681e00b helm-v3.14.3-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
85e1573e76fa60af14ba7e9ec75db2129b6884203be866893fa0b3f7e41ccd5e helm-v3.14.3-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
aab121ca470e2a502cda849a9b3e92eeb9a32e213b0f0a79a95a04e375d26ce7 helm-v3.14.3-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
d64fa8aced3244b549377741dc4e2db8109e5270c0723c11b547a9da5f99ad43 helm-v3.14.3-linux-s390x.tar.gz
|
||||
@@ -11,7 +11,7 @@
|
||||
# Use ./hack/installers/checksums/add-helm-checksums.sh and
|
||||
# add-kustomize-checksums.sh to help download checksums.
|
||||
###############################################################################
|
||||
helm3_version=3.13.2
|
||||
helm3_version=3.14.3
|
||||
kubectl_version=1.17.8
|
||||
kubectx_version=0.6.3
|
||||
kustomize5_version=5.2.1
|
||||
|
||||
@@ -20,8 +20,6 @@ spec:
|
||||
- args:
|
||||
- /usr/local/bin/argocd-application-controller
|
||||
env:
|
||||
- name: ARGOCD_CONTROLLER_REPLICAS
|
||||
value: "1"
|
||||
- name: ARGOCD_RECONCILIATION_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -34,6 +32,12 @@ spec:
|
||||
name: argocd-cm
|
||||
key: timeout.hard.reconciliation
|
||||
optional: true
|
||||
- name: ARGOCD_RECONCILIATION_JITTER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: timeout.reconciliation.jitter
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
|
||||
@@ -36,3 +36,11 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
@@ -35,6 +35,12 @@ spec:
|
||||
name: argocd-cm
|
||||
key: timeout.hard.reconciliation
|
||||
optional: true
|
||||
- name: ARGOCD_RECONCILIATION_JITTER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: timeout.reconciliation.jitter
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.10.5
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -25,136 +25,136 @@ spec:
|
||||
env:
|
||||
- name: ARGOCD_SERVER_INSECURE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.insecure
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.insecure
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_BASEHREF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.basehref
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.basehref
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_ROOTPATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.rootpath
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.rootpath
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_LOGFORMAT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.log.format
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.log.format
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_LOG_LEVEL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.log.level
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.log.level
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_REPO_SERVER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: repo.server
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: repo.server
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_DEX_SERVER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.dex.server
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.dex.server
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_DISABLE_AUTH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.disable.auth
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.disable.auth
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_ENABLE_GZIP
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.enable.gzip
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.enable.gzip
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.repo.server.timeout.seconds
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.repo.server.timeout.seconds
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_X_FRAME_OPTIONS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.x.frame.options
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.x.frame.options
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_CONTENT_SECURITY_POLICY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.content.security.policy
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.content.security.policy
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_REPO_SERVER_PLAINTEXT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.repo.server.plaintext
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.repo.server.plaintext
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_REPO_SERVER_STRICT_TLS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.repo.server.strict.tls
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.repo.server.strict.tls
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_DEX_SERVER_PLAINTEXT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.dex.server.plaintext
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.dex.server.plaintext
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_DEX_SERVER_STRICT_TLS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.dex.server.strict.tls
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.dex.server.strict.tls
|
||||
optional: true
|
||||
- name: ARGOCD_TLS_MIN_VERSION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.tls.minversion
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.tls.minversion
|
||||
optional: true
|
||||
- name: ARGOCD_TLS_MAX_VERSION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.tls.maxversion
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.tls.maxversion
|
||||
optional: true
|
||||
- name: ARGOCD_TLS_CIPHERS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.tls.ciphers
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.tls.ciphers
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_CONNECTION_STATUS_CACHE_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.connection.status.cache.expiration
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.connection.status.cache.expiration
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_OIDC_CACHE_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.oidc.cache.expiration
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.oidc.cache.expiration
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_LOGIN_ATTEMPTS_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.login.attempts.expiration
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.login.attempts.expiration
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_STATIC_ASSETS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -163,16 +163,16 @@ spec:
|
||||
optional: true
|
||||
- name: ARGOCD_APP_STATE_CACHE_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.app.state.cache.expiration
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.app.state.cache.expiration
|
||||
optional: true
|
||||
- name: REDIS_SERVER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: redis.server
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: redis.server
|
||||
optional: true
|
||||
- name: REDIS_COMPRESSION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -181,76 +181,82 @@ spec:
|
||||
optional: true
|
||||
- name: REDISDB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: redis.db
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: redis.db
|
||||
optional: true
|
||||
- name: ARGOCD_DEFAULT_CACHE_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.default.cache.expiration
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.default.cache.expiration
|
||||
optional: true
|
||||
- name: ARGOCD_MAX_COOKIE_NUMBER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.http.cookie.maxnumber
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.http.cookie.maxnumber
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_LISTEN_ADDRESS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.listen.address
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.listen.address
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_METRICS_LISTEN_ADDRESS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.metrics.listen.address
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.metrics.listen.address
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_OTLP_ADDRESS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: otlp.address
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: otlp.address
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_OTLP_INSECURE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: otlp.insecure
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: otlp.insecure
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_OTLP_HEADERS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: otlp.headers
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: otlp.headers
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: application.namespaces
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: application.namespaces
|
||||
optional: true
|
||||
- name: ARGOCD_SERVER_ENABLE_PROXY_EXTENSION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.enable.proxy.extension
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.enable.proxy.extension
|
||||
optional: true
|
||||
- name: ARGOCD_K8SCLIENT_RETRY_MAX
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.k8sclient.retry.max
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.k8sclient.retry.max
|
||||
optional: true
|
||||
- name: ARGOCD_K8SCLIENT_RETRY_BASE_BACKOFF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
optional: true
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
optional: true
|
||||
- name: ARGOCD_API_CONTENT_TYPES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: server.api.content.types
|
||||
optional: true
|
||||
volumeMounts:
|
||||
- name: ssh-known-hosts
|
||||
mountPath: /app/config/ssh
|
||||
|
||||
@@ -0,0 +1,88 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
name: argocd-applicationset-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applications
|
||||
- applicationsets
|
||||
- applicationsets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applicationsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- appprojects
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
@@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
name: argocd-applicationset-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-applicationset-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-applicationset-controller
|
||||
namespace: argocd
|
||||
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- argocd-applicationset-controller-clusterrole.yaml
|
||||
- argocd-applicationset-controller-clusterrolebinding.yaml
|
||||
@@ -3,4 +3,5 @@ kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./applicationset-controller
|
||||
- ./server
|
||||
|
||||
@@ -7310,8 +7310,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
properties:
|
||||
@@ -9654,8 +9652,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
@@ -14681,8 +14677,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
@@ -20582,6 +20576,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -21018,7 +21020,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -21342,7 +21344,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -21394,7 +21396,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -21493,6 +21495,12 @@ spec:
|
||||
key: timeout.hard.reconciliation
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_RECONCILIATION_JITTER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: timeout.reconciliation.jitter
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -21649,7 +21657,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.10.5
|
||||
|
||||
@@ -2370,8 +2370,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
properties:
|
||||
@@ -4714,8 +4712,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
@@ -9741,8 +9737,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.10.5
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
@@ -7310,8 +7310,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
properties:
|
||||
@@ -9654,8 +9652,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
@@ -14681,8 +14677,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
@@ -20618,6 +20612,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -20847,6 +20849,95 @@ rules:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applications
|
||||
- applicationsets
|
||||
- applicationsets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applicationsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- appprojects
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -21028,6 +21119,23 @@ subjects:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-applicationset-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-applicationset-controller
|
||||
namespace: argocd
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -22275,7 +22383,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -22398,7 +22506,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -22480,7 +22588,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -22835,7 +22943,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -22887,7 +22995,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -23200,7 +23308,13 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
- name: ARGOCD_API_CONTENT_TYPES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -23326,6 +23440,12 @@ spec:
|
||||
key: timeout.hard.reconciliation
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_RECONCILIATION_JITTER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: timeout.reconciliation.jitter
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -23482,7 +23602,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -109,6 +109,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -1660,7 +1668,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1783,7 +1791,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1865,7 +1873,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2220,7 +2228,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2272,7 +2280,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2585,7 +2593,13 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
- name: ARGOCD_API_CONTENT_TYPES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2711,6 +2725,12 @@ spec:
|
||||
key: timeout.hard.reconciliation
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_RECONCILIATION_JITTER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: timeout.reconciliation.jitter
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -2867,7 +2887,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -7310,8 +7310,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
properties:
|
||||
@@ -9654,8 +9652,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
@@ -14681,8 +14677,6 @@ spec:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
required:
|
||||
- elements
|
||||
type: object
|
||||
matrix:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
@@ -20609,6 +20603,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -20806,6 +20808,95 @@ rules:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applications
|
||||
- applicationsets
|
||||
- applicationsets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applicationsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- appprojects
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -20955,6 +21046,23 @@ subjects:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-applicationset-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-applicationset-controller
|
||||
namespace: argocd
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -21370,7 +21478,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -21493,7 +21601,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -21575,7 +21683,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -21881,7 +21989,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -21933,7 +22041,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -22244,7 +22352,13 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
- name: ARGOCD_API_CONTENT_TYPES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -22370,6 +22484,12 @@ spec:
|
||||
key: timeout.hard.reconciliation
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_RECONCILIATION_JITTER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: timeout.reconciliation.jitter
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -22526,7 +22646,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -100,6 +100,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -755,7 +763,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -878,7 +886,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -960,7 +968,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1266,7 +1274,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1318,7 +1326,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1629,7 +1637,13 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
- name: ARGOCD_API_CONTENT_TYPES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1755,6 +1769,12 @@ spec:
|
||||
key: timeout.hard.reconciliation
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_RECONCILIATION_JITTER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: timeout.reconciliation.jitter
|
||||
name: argocd-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -1911,7 +1931,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.5
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -260,6 +260,7 @@ func (g ApplicationSetTerminalGenerators) toApplicationSetNestedGenerators() []A
|
||||
|
||||
// ListGenerator include items info
|
||||
type ListGenerator struct {
|
||||
// +kubebuilder:validation:Optional
|
||||
Elements []apiextensionsv1.JSON `json:"elements" protobuf:"bytes,1,name=elements"`
|
||||
Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,2,name=template"`
|
||||
ElementsYaml string `json:"elementsYaml,omitempty" protobuf:"bytes,3,opt,name=elementsYaml"`
|
||||
|
||||
@@ -1131,6 +1131,7 @@ message KustomizeSelector {
|
||||
|
||||
// ListGenerator include items info
|
||||
message ListGenerator {
|
||||
// +kubebuilder:validation:Optional
|
||||
repeated k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSON elements = 1;
|
||||
|
||||
optional ApplicationSetTemplate template = 2;
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
type AppControllerRateLimiterConfig struct {
|
||||
BucketSize int64
|
||||
BucketQPS int64
|
||||
BucketQPS float64
|
||||
FailureCoolDown time.Duration
|
||||
BaseDelay time.Duration
|
||||
MaxDelay time.Duration
|
||||
@@ -22,7 +22,8 @@ func GetDefaultAppRateLimiterConfig() *AppControllerRateLimiterConfig {
|
||||
return &AppControllerRateLimiterConfig{
|
||||
// global queue rate limit config
|
||||
500,
|
||||
50,
|
||||
// when WORKQUEUE_BUCKET_QPS is MaxFloat64 global bucket limiting is disabled(default)
|
||||
math.MaxFloat64,
|
||||
// individual item rate limit config
|
||||
// when WORKQUEUE_FAILURE_COOLDOWN is 0 per item rate limiting is disabled(default)
|
||||
0,
|
||||
|
||||
@@ -107,6 +107,7 @@ type RepoServerInitConstants struct {
|
||||
StreamedManifestMaxExtractedSize int64
|
||||
StreamedManifestMaxTarSize int64
|
||||
HelmManifestMaxExtractedSize int64
|
||||
HelmRegistryMaxIndexSize int64
|
||||
DisableHelmManifestMaxExtractedSize bool
|
||||
}
|
||||
|
||||
@@ -1389,7 +1390,7 @@ func GenerateManifests(ctx context.Context, appPath, repoRoot, revision string,
|
||||
if q.KustomizeOptions != nil {
|
||||
kustomizeBinary = q.KustomizeOptions.BinaryPath
|
||||
}
|
||||
k := kustomize.NewKustomizeApp(appPath, q.Repo.GetGitCreds(gitCredsStore), repoURL, kustomizeBinary)
|
||||
k := kustomize.NewKustomizeApp(repoRoot, appPath, q.Repo.GetGitCreds(gitCredsStore), repoURL, kustomizeBinary)
|
||||
targetObjs, _, err = k.Build(q.ApplicationSource.Kustomize, q.KustomizeOptions, env)
|
||||
case v1alpha1.ApplicationSourceTypePlugin:
|
||||
pluginName := ""
|
||||
@@ -1976,7 +1977,7 @@ func (s *Service) GetAppDetails(ctx context.Context, q *apiclient.RepoServerAppD
|
||||
return err
|
||||
}
|
||||
case v1alpha1.ApplicationSourceTypeKustomize:
|
||||
if err := populateKustomizeAppDetails(res, q, opContext.appPath, commitSHA, s.gitCredsStore); err != nil {
|
||||
if err := populateKustomizeAppDetails(res, q, repoRoot, opContext.appPath, commitSHA, s.gitCredsStore); err != nil {
|
||||
return err
|
||||
}
|
||||
case v1alpha1.ApplicationSourceTypePlugin:
|
||||
@@ -2117,13 +2118,13 @@ func walkHelmValueFilesInPath(root string, valueFiles *[]string) filepath.WalkFu
|
||||
}
|
||||
}
|
||||
|
||||
func populateKustomizeAppDetails(res *apiclient.RepoAppDetailsResponse, q *apiclient.RepoServerAppDetailsQuery, appPath string, reversion string, credsStore git.CredsStore) error {
|
||||
func populateKustomizeAppDetails(res *apiclient.RepoAppDetailsResponse, q *apiclient.RepoServerAppDetailsQuery, repoRoot string, appPath string, reversion string, credsStore git.CredsStore) error {
|
||||
res.Kustomize = &apiclient.KustomizeAppSpec{}
|
||||
kustomizeBinary := ""
|
||||
if q.KustomizeOptions != nil {
|
||||
kustomizeBinary = q.KustomizeOptions.BinaryPath
|
||||
}
|
||||
k := kustomize.NewKustomizeApp(appPath, q.Repo.GetGitCreds(credsStore), q.Repo.Repo, kustomizeBinary)
|
||||
k := kustomize.NewKustomizeApp(repoRoot, appPath, q.Repo.GetGitCreds(credsStore), q.Repo.Repo, kustomizeBinary)
|
||||
fakeManifestRequest := apiclient.ManifestRequest{
|
||||
AppName: q.AppName,
|
||||
Namespace: "", // FIXME: omit it for now
|
||||
@@ -2356,7 +2357,7 @@ func (s *Service) newHelmClientResolveRevision(repo *v1alpha1.Repository, revisi
|
||||
return helmClient, version.String(), nil
|
||||
}
|
||||
|
||||
index, err := helmClient.GetIndex(noRevisionCache)
|
||||
index, err := helmClient.GetIndex(noRevisionCache, s.initConstants.HelmRegistryMaxIndexSize)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -2434,7 +2435,7 @@ func checkoutRevision(gitClient git.Client, revision string, submoduleEnabled bo
|
||||
}
|
||||
|
||||
func (s *Service) GetHelmCharts(ctx context.Context, q *apiclient.HelmChartsRequest) (*apiclient.HelmChartsResponse, error) {
|
||||
index, err := s.newHelmClient(q.Repo.Repo, q.Repo.GetHelmCreds(), q.Repo.EnableOCI, q.Repo.Proxy, helm.WithChartPaths(s.chartPaths)).GetIndex(true)
|
||||
index, err := s.newHelmClient(q.Repo.Repo, q.Repo.GetHelmCreds(), q.Repo.EnableOCI, q.Repo.Proxy, helm.WithChartPaths(s.chartPaths)).GetIndex(true, s.initConstants.HelmRegistryMaxIndexSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2469,7 +2470,7 @@ func (s *Service) TestRepository(ctx context.Context, q *apiclient.TestRepositor
|
||||
_, err := helm.NewClient(repo.Repo, repo.GetHelmCreds(), repo.EnableOCI, repo.Proxy).TestHelmOCI()
|
||||
return err
|
||||
} else {
|
||||
_, err := helm.NewClient(repo.Repo, repo.GetHelmCreds(), repo.EnableOCI, repo.Proxy).GetIndex(false)
|
||||
_, err := helm.NewClient(repo.Repo, repo.GetHelmCreds(), repo.EnableOCI, repo.Proxy).GetIndex(false, s.initConstants.HelmRegistryMaxIndexSize)
|
||||
return err
|
||||
}
|
||||
},
|
||||
|
||||
@@ -113,7 +113,7 @@ func newServiceWithMocks(t *testing.T, root string, signed bool) (*Service, *git
|
||||
chart := "my-chart"
|
||||
oobChart := "out-of-bounds-chart"
|
||||
version := "1.1.0"
|
||||
helmClient.On("GetIndex", mock.AnythingOfType("bool")).Return(&helm.Index{Entries: map[string]helm.Entries{
|
||||
helmClient.On("GetIndex", mock.AnythingOfType("bool"), mock.Anything).Return(&helm.Index{Entries: map[string]helm.Entries{
|
||||
chart: {{Version: "1.0.0"}, {Version: version}},
|
||||
oobChart: {{Version: "1.0.0"}, {Version: version}},
|
||||
}}, nil)
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
hs = { status = "Progressing", message = "AdvancedCronJobs has active jobs" }
|
||||
-- Extract lastScheduleTime and convert to time objects
|
||||
lastScheduleTime = nil
|
||||
|
||||
if obj.status.lastScheduleTime ~= nil then
|
||||
local year, month, day, hour, min, sec = string.match(obj.status.lastScheduleTime, "(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z")
|
||||
lastScheduleTime = os.time({year=year, month=month, day=day, hour=hour, min=min, sec=sec})
|
||||
end
|
||||
|
||||
|
||||
if lastScheduleTime == nil and obj.spec.paused == true then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "AdvancedCronJob is Paused"
|
||||
return hs
|
||||
end
|
||||
|
||||
-- AdvancedCronJobs are progressing if they have any object in the "active" state
|
||||
if obj.status.active ~= nil and #obj.status.active > 0 then
|
||||
hs.status = "Progressing"
|
||||
hs.message = "AdvancedCronJobs has active jobs"
|
||||
return hs
|
||||
end
|
||||
-- AdvancedCronJobs are Degraded if they don't have lastScheduleTime
|
||||
if lastScheduleTime == nil then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "AdvancedCronJobs has not run successfully"
|
||||
return hs
|
||||
end
|
||||
-- AdvancedCronJobs are healthy if they have lastScheduleTime
|
||||
if lastScheduleTime ~= nil then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "AdvancedCronJobs has run successfully"
|
||||
return hs
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,17 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: AdvancedCronJobs has run successfully
|
||||
inputPath: testdata/lastScheduleTime.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: AdvancedCronJobs has not run successfully
|
||||
inputPath: testdata/notScheduled.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: AdvancedCronJobs has active jobs
|
||||
inputPath: testdata/activeJobs.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: AdvancedCronJob is Paused
|
||||
inputPath: testdata/suspended.yaml
|
||||
30
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml
vendored
Normal file
30
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
|
||||
status:
|
||||
active:
|
||||
- apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
name: acj-test-1694882400
|
||||
namespace: default
|
||||
resourceVersion: '4012'
|
||||
uid: 2b08a429-a43b-4382-8e5d-3db0c72b5b13
|
||||
lastScheduleTime: '2023-09-16T16:40:00Z'
|
||||
type: BroadcastJob
|
||||
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml
vendored
Normal file
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
|
||||
status:
|
||||
lastScheduleTime: "2023-09-16T16:29:00Z"
|
||||
type: BroadcastJob
|
||||
22
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml
vendored
Normal file
22
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
|
||||
status:
|
||||
lastScheduleTime: null
|
||||
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml
vendored
Normal file
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
paused: true
|
||||
|
||||
status:
|
||||
type: BroadcastJob
|
||||
@@ -0,0 +1,32 @@
|
||||
hs={ status= "Progressing", message= "BroadcastJob is still running" }
|
||||
|
||||
if obj.status ~= nil then
|
||||
|
||||
-- BroadcastJob are healthy if desired number and succeeded number is equal
|
||||
if obj.status.desired == obj.status.succeeded and obj.status.phase == "completed" then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "BroadcastJob is completed successfully"
|
||||
return hs
|
||||
end
|
||||
-- BroadcastJob are progressing if active is not equal to 0
|
||||
if obj.status.active ~= 0 and obj.status.phase == "running" then
|
||||
hs.status = "Progressing"
|
||||
hs.message = "BroadcastJob is still running"
|
||||
return hs
|
||||
end
|
||||
-- BroadcastJob are progressing if failed is not equal to 0
|
||||
if obj.status.failed ~= 0 and obj.status.phase == "failed" then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "BroadcastJob failed"
|
||||
return hs
|
||||
end
|
||||
|
||||
if obj.status.phase == "paused" and obj.spec.paused == true then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "BroadcastJob is Paused"
|
||||
return hs
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,17 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "BroadcastJob is completed successfully"
|
||||
inputPath: testdata/succeeded.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "BroadcastJob failed"
|
||||
inputPath: testdata/failed.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "BroadcastJob is still running"
|
||||
inputPath: testdata/running.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "BroadcastJob is Paused"
|
||||
inputPath: testdata/suspended.yaml
|
||||
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml
vendored
Normal file
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
metadata:
|
||||
name: failed-job
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: openkruise/guestbook:v3
|
||||
command: ["exit", "1"] # a dummy command to fail
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds
|
||||
|
||||
status:
|
||||
active: 0
|
||||
completionTime: '2023-09-17T14:31:38Z'
|
||||
conditions:
|
||||
- lastProbeTime: '2023-09-17T14:31:38Z'
|
||||
lastTransitionTime: '2023-09-17T14:31:38Z'
|
||||
message: failure policy is FailurePolicyTypeFailFast and failed pod is found
|
||||
reason: Failed
|
||||
status: 'True'
|
||||
type: Failed
|
||||
desired: 1
|
||||
failed: 1
|
||||
phase: failed
|
||||
startTime: '2023-09-17T14:31:32Z'
|
||||
succeeded: 0
|
||||
22
resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml
vendored
Normal file
22
resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
metadata:
|
||||
name: download-image
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: openkruise/guestbook:v3
|
||||
command: ["echo", "started"] # a dummy command to do nothing
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds
|
||||
status:
|
||||
active: 1
|
||||
desired: 1
|
||||
failed: 0
|
||||
phase: running
|
||||
startTime: '2023-09-17T14:43:30Z'
|
||||
succeeded: 0
|
||||
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml
vendored
Normal file
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
metadata:
|
||||
name: download-image
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: openkruise/guestbook:v3
|
||||
command: ["echo", "started"] # a dummy command to do nothing
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds
|
||||
status:
|
||||
active: 0
|
||||
completionTime: '2023-09-17T14:35:14Z'
|
||||
conditions:
|
||||
- lastProbeTime: '2023-09-17T14:35:14Z'
|
||||
lastTransitionTime: '2023-09-17T14:35:14Z'
|
||||
message: Job completed, 1 pods succeeded, 0 pods failed
|
||||
reason: Complete
|
||||
status: 'True'
|
||||
type: Complete
|
||||
desired: 1
|
||||
failed: 0
|
||||
phase: completed
|
||||
startTime: '2023-09-17T14:35:07Z'
|
||||
succeeded: 1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user