mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-22 10:38:51 +01:00
Compare commits
10 Commits
hydrator-c
...
v2.10.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d0f940e04 | ||
|
|
8b865d7e30 | ||
|
|
a6d8c924ee | ||
|
|
3c5878ecf4 | ||
|
|
12f2252700 | ||
|
|
6f6a9a940b | ||
|
|
3ca67858f0 | ||
|
|
3f18c21c07 | ||
|
|
3ebcca66f3 | ||
|
|
d9196060c2 |
2
Makefile
2
Makefile
@@ -49,7 +49,7 @@ ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=60m
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,4 +1,4 @@
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
|
||||
@@ -146,7 +146,7 @@ func NewCommand() *cobra.Command {
|
||||
appController.InvalidateProjectsCache()
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
clusterSharding := getClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -164,7 +164,7 @@ func NewCommand() *cobra.Command {
|
||||
metricsAplicationLabels,
|
||||
kubectlParallelismLimit,
|
||||
persistResourceHealth,
|
||||
clusterFilter,
|
||||
clusterSharding,
|
||||
applicationNamespaces,
|
||||
&workqueueRateLimit,
|
||||
serverSideDiff,
|
||||
@@ -233,11 +233,10 @@ func NewCommand() *cobra.Command {
|
||||
return &command
|
||||
}
|
||||
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {
|
||||
|
||||
var replicas int
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
|
||||
func getClusterSharding(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterShardingCache {
|
||||
var replicasCount int
|
||||
// StatefulSet mode and Deployment mode uses different default values for shard number.
|
||||
defaultShardNumberValue := 0
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
@@ -247,22 +246,21 @@ func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.Se
|
||||
}
|
||||
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicas = int(*appControllerDeployment.Spec.Replicas)
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
defaultShardNumberValue = -1
|
||||
} else {
|
||||
replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
var clusterFilter func(cluster *v1alpha1.Cluster) bool
|
||||
if replicas > 1 {
|
||||
shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, defaultShardNumberValue, -math.MaxInt32, math.MaxInt32)
|
||||
if replicasCount > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil {
|
||||
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard)
|
||||
shardNumber, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
|
||||
if !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
@@ -271,19 +269,19 @@ func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.Se
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shard < 0 {
|
||||
if shardNumber < 0 {
|
||||
var err error
|
||||
shard, err = sharding.InferShard()
|
||||
shardNumber, err = sharding.InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
if shardNumber > replicasCount {
|
||||
log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
|
||||
shardNumber = 0
|
||||
}
|
||||
}
|
||||
log.Infof("Processing clusters from shard %d", shard)
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm)
|
||||
clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
return clusterFilter
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
return sharding.NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
@@ -61,6 +62,7 @@ func NewCommand() *cobra.Command {
|
||||
repoServerAddress string
|
||||
dexServerAddress string
|
||||
disableAuth bool
|
||||
contentTypes string
|
||||
enableGZip bool
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
cacheSrc func() (*servercache.Cache, error)
|
||||
@@ -180,6 +182,7 @@ func NewCommand() *cobra.Command {
|
||||
DexServerAddr: dexServerAddress,
|
||||
DexTLSConfig: dexTlsConfig,
|
||||
DisableAuth: disableAuth,
|
||||
ContentTypes: strings.Split(contentTypes, ";"),
|
||||
EnableGZip: enableGZip,
|
||||
TLSConfigCustomizer: tlsConfigCustomizer,
|
||||
Cache: cache,
|
||||
@@ -234,6 +237,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
|
||||
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", "application/json", "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
@@ -78,7 +79,7 @@ type ClusterWithInfo struct {
|
||||
Namespaces []string
|
||||
}
|
||||
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, shardingAlgorithm string, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
|
||||
|
||||
argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
|
||||
@@ -86,6 +87,10 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusterShardingCache := sharding.NewClusterSharding(argoDB, shard, replicas, shardingAlgorithm)
|
||||
clusterShardingCache.Init(clustersList)
|
||||
clusterShards := clusterShardingCache.GetDistribution()
|
||||
|
||||
var cache *appstatecache.Cache
|
||||
if portForwardRedis {
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
@@ -122,8 +127,15 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
apps[i] = app
|
||||
}
|
||||
clusters := make([]ClusterWithInfo, len(clustersList.Items))
|
||||
|
||||
batchSize := 10
|
||||
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
|
||||
clusterSharding := &sharding.ClusterSharding{
|
||||
Shard: shard,
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
}
|
||||
for batchNum := 0; batchNum < batchesCount; batchNum++ {
|
||||
batchStart := batchSize * batchNum
|
||||
batchEnd := batchSize * (batchNum + 1)
|
||||
@@ -135,12 +147,12 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
clusterShard := 0
|
||||
cluster := batch[i]
|
||||
if replicas > 0 {
|
||||
distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(clusterSharding.GetClusterAccessor(), common.DefaultShardingAlgorithm, replicas)
|
||||
distributionFunction(&cluster)
|
||||
clusterShard := clusterShards[cluster.Server]
|
||||
cluster.Shard = pointer.Int64(int64(clusterShard))
|
||||
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
|
||||
}
|
||||
|
||||
if shard != -1 && clusterShard != shard {
|
||||
return nil
|
||||
}
|
||||
@@ -176,6 +188,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
@@ -183,7 +196,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "shards",
|
||||
Short: "Print information about each controller shard and portion of Kubernetes resources it is responsible for.",
|
||||
Short: "Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -203,8 +216,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
if replicas == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
errors.CheckError(err)
|
||||
if len(clusters) == 0 {
|
||||
return
|
||||
@@ -216,7 +228,9 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
|
||||
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
|
||||
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
|
||||
|
||||
// parse all added flags so far to get the redis-compression flag that was added by AddCacheFlagsToCmd() above
|
||||
@@ -461,6 +475,7 @@ func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
@@ -494,7 +509,7 @@ argocd admin cluster stats target-cluster`,
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
errors.CheckError(err)
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
@@ -508,6 +523,7 @@ argocd admin cluster stats target-cluster`,
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&shard, "shard", -1, "Cluster shard filter")
|
||||
command.Flags().IntVar(&replicas, "replicas", 0, "Application controller replicas count. Inferred from number of running controller pods if not specified")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", common.DefaultShardingAlgorithm, "Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&portForwardRedis, "port-forward-redis", true, "Automatically port-forward ha proxy redis from current namespace?")
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command)
|
||||
|
||||
|
||||
@@ -115,9 +115,9 @@ const (
|
||||
LegacyShardingAlgorithm = "legacy"
|
||||
// RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards
|
||||
RoundRobinShardingAlgorithm = "round-robin"
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
// AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller
|
||||
AppControllerHeartbeatUpdateRetryCount = 3
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
)
|
||||
|
||||
// Dex related constants
|
||||
|
||||
@@ -126,7 +126,7 @@ type ApplicationController struct {
|
||||
refreshRequestedAppsMutex *sync.Mutex
|
||||
metricsServer *metrics.MetricsServer
|
||||
kubectlSemaphore *semaphore.Weighted
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
clusterSharding sharding.ClusterShardingCache
|
||||
projByNameCache sync.Map
|
||||
applicationNamespaces []string
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func NewApplicationController(
|
||||
metricsApplicationLabels []string,
|
||||
kubectlParallelismLimit int64,
|
||||
persistResourceHealth bool,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
clusterSharding sharding.ClusterShardingCache,
|
||||
applicationNamespaces []string,
|
||||
rateLimiterConfig *ratelimiter.AppControllerRateLimiterConfig,
|
||||
serverSideDiff bool,
|
||||
@@ -179,7 +179,7 @@ func NewApplicationController(
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterFilter: clusterFilter,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
}
|
||||
@@ -260,7 +260,7 @@ func NewApplicationController(
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter, argo.NewResourceTracking())
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
@@ -772,6 +772,13 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
go ctrl.projInformer.Run(ctx.Done())
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
|
||||
clusters, err := ctrl.db.ListClusters(ctx)
|
||||
if err != nil {
|
||||
log.Warnf("Cannot init sharding. Error while querying clusters list from database: %v", err)
|
||||
} else {
|
||||
ctrl.clusterSharding.Init(clusters)
|
||||
}
|
||||
|
||||
errors.CheckError(ctrl.stateCache.Init())
|
||||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced, ctrl.projInformer.HasSynced) {
|
||||
@@ -1976,15 +1983,11 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
}
|
||||
}
|
||||
|
||||
if ctrl.clusterFilter != nil {
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return ctrl.clusterFilter(nil)
|
||||
}
|
||||
return ctrl.clusterFilter(cluster)
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return ctrl.clusterSharding.IsManagedCluster(nil)
|
||||
}
|
||||
|
||||
return true
|
||||
return ctrl.clusterSharding.IsManagedCluster(cluster)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) {
|
||||
@@ -2136,7 +2139,7 @@ func (ctrl *ApplicationController) projectErrorToCondition(err error, app *appv1
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) {
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterFilter, ctrl.getAppProj, ctrl.namespace)
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterSharding.IsManagedCluster, ctrl.getAppProj, ctrl.namespace)
|
||||
go updater.Run(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,9 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
|
||||
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -154,6 +156,10 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
// Setting a default sharding algorithm for the tests where we cannot set it.
|
||||
ctrl.clusterSharding = sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -686,7 +692,6 @@ func TestFinalizeAppDeletion(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(appObj): appObj,
|
||||
}}, nil)
|
||||
|
||||
patched := false
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
defaultReactor := fakeAppCs.ReactionChain[0]
|
||||
@@ -1809,13 +1814,11 @@ func Test_canProcessApp(t *testing.T) {
|
||||
})
|
||||
t.Run("with cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
ctrl.clusterFilter = func(_ *v1alpha1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
|
||||
21
controller/cache/cache.go
vendored
21
controller/cache/cache.go
vendored
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
@@ -168,7 +169,7 @@ func NewLiveStateCache(
|
||||
kubectl kube.Kubectl,
|
||||
metricsServer *metrics.MetricsServer,
|
||||
onObjectUpdated ObjectUpdatedHandler,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
clusterSharding sharding.ClusterShardingCache,
|
||||
resourceTracking argo.ResourceTracking) LiveStateCache {
|
||||
|
||||
return &liveStateCache{
|
||||
@@ -179,7 +180,7 @@ func NewLiveStateCache(
|
||||
kubectl: kubectl,
|
||||
settingsMgr: settingsMgr,
|
||||
metricsServer: metricsServer,
|
||||
clusterFilter: clusterFilter,
|
||||
clusterSharding: clusterSharding,
|
||||
resourceTracking: resourceTracking,
|
||||
}
|
||||
}
|
||||
@@ -202,7 +203,7 @@ type liveStateCache struct {
|
||||
kubectl kube.Kubectl
|
||||
settingsMgr *settings.SettingsManager
|
||||
metricsServer *metrics.MetricsServer
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
clusterSharding sharding.ClusterShardingCache
|
||||
resourceTracking argo.ResourceTracking
|
||||
|
||||
clusters map[string]clustercache.ClusterCache
|
||||
@@ -722,22 +723,24 @@ func (c *liveStateCache) Run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) canHandleCluster(cluster *appv1.Cluster) bool {
|
||||
if c.clusterFilter == nil {
|
||||
return true
|
||||
}
|
||||
return c.clusterFilter(cluster)
|
||||
return c.clusterSharding.IsManagedCluster(cluster)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
|
||||
c.clusterSharding.Add(cluster)
|
||||
if !c.canHandleCluster(cluster) {
|
||||
log.Infof("Ignoring cluster %s", cluster.Server)
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
_, ok := c.clusters[cluster.Server]
|
||||
c.lock.Unlock()
|
||||
if !ok {
|
||||
log.Debugf("Checking if cache %v / cluster %v has appInformer %v", c, cluster, c.appInformer)
|
||||
if c.appInformer == nil {
|
||||
log.Warn("Cannot get a cluster appInformer. Cache may not be started this time")
|
||||
return
|
||||
}
|
||||
if c.isClusterHasApps(c.appInformer.GetStore().List(), cluster) {
|
||||
go func() {
|
||||
// warm up cache for cluster with apps
|
||||
@@ -748,6 +751,7 @@ func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *appv1.Cluster) {
|
||||
c.clusterSharding.Update(newCluster)
|
||||
c.lock.Lock()
|
||||
cluster, ok := c.clusters[newCluster.Server]
|
||||
c.lock.Unlock()
|
||||
@@ -790,6 +794,7 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
|
||||
|
||||
func (c *liveStateCache) handleDeleteEvent(clusterServer string) {
|
||||
c.lock.RLock()
|
||||
c.clusterSharding.Delete(clusterServer)
|
||||
cluster, ok := c.clusters[clusterServer]
|
||||
c.lock.RUnlock()
|
||||
if ok {
|
||||
|
||||
50
controller/cache/cache_test.go
vendored
50
controller/cache/cache_test.go
vendored
@@ -21,7 +21,11 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
@@ -35,11 +39,13 @@ func TestHandleModEvent_HasChanges(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
clusterCache.On("EnsureSynced").Return(nil).Once()
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -56,14 +62,22 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
clusterCache.On("EnsureSynced").Return(nil).Once()
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return false
|
||||
db: nil,
|
||||
appInformer: nil,
|
||||
onObjectUpdated: func(managedByApp map[string]bool, ref v1.ObjectReference) {
|
||||
},
|
||||
kubectl: nil,
|
||||
settingsMgr: &argosettings.SettingsManager{},
|
||||
metricsServer: &metrics.MetricsServer{},
|
||||
// returns a shard that never process any cluster
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
resourceTracking: nil,
|
||||
clusters: map[string]cache.ClusterCache{"https://mycluster": clusterCache},
|
||||
cacheSettings: cacheSettings{},
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -75,18 +89,20 @@ func TestHandleModEvent_ClusterExcluded(t *testing.T) {
|
||||
Namespaces: []string{"default"},
|
||||
})
|
||||
|
||||
assert.Len(t, clustersCache.clusters, 0)
|
||||
assert.Len(t, clustersCache.clusters, 1)
|
||||
}
|
||||
|
||||
func TestHandleModEvent_NoChanges(t *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything).Panic("should not invalidate")
|
||||
clusterCache.On("EnsureSynced").Return(nil).Panic("should not re-sync")
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
"https://mycluster": clusterCache,
|
||||
},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
|
||||
clustersCache.handleModEvent(&appv1.Cluster{
|
||||
@@ -99,11 +115,11 @@ func TestHandleModEvent_NoChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandleAddEvent_ClusterExcluded(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return false
|
||||
},
|
||||
clusters: map[string]cache.ClusterCache{},
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 2, common.DefaultShardingAlgorithm),
|
||||
}
|
||||
clustersCache.handleAddEvent(&appv1.Cluster{
|
||||
Server: "https://mycluster",
|
||||
@@ -118,6 +134,8 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
Server: "https://mycluster",
|
||||
Config: appv1.ClusterConfig{Username: "bar"},
|
||||
}
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
|
||||
liveStateCacheLock := sync.RWMutex{}
|
||||
@@ -126,10 +144,8 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
testCluster.Server: gitopsEngineClusterCache,
|
||||
},
|
||||
clusterFilter: func(cluster *appv1.Cluster) bool {
|
||||
return true
|
||||
},
|
||||
settingsMgr: settingsMgr,
|
||||
clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
|
||||
settingsMgr: settingsMgr,
|
||||
// Set the lock here so we can reference it later
|
||||
// nolint We need to overwrite here to have access to the lock
|
||||
lock: liveStateCacheLock,
|
||||
|
||||
163
controller/sharding/cache.go
Normal file
163
controller/sharding/cache.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ClusterShardingCache interface {
|
||||
Init(clusters *v1alpha1.ClusterList)
|
||||
Add(c *v1alpha1.Cluster)
|
||||
Delete(clusterServer string)
|
||||
Update(c *v1alpha1.Cluster)
|
||||
IsManagedCluster(c *v1alpha1.Cluster) bool
|
||||
GetDistribution() map[string]int
|
||||
}
|
||||
|
||||
type ClusterSharding struct {
|
||||
Shard int
|
||||
Replicas int
|
||||
Shards map[string]int
|
||||
Clusters map[string]*v1alpha1.Cluster
|
||||
lock sync.RWMutex
|
||||
getClusterShard DistributionFunction
|
||||
}
|
||||
|
||||
func NewClusterSharding(db db.ArgoDB, shard, replicas int, shardingAlgorithm string) ClusterShardingCache {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
clusterSharding := &ClusterSharding{
|
||||
Shard: shard,
|
||||
Replicas: replicas,
|
||||
Shards: make(map[string]int),
|
||||
Clusters: make(map[string]*v1alpha1.Cluster),
|
||||
}
|
||||
distributionFunction := NoShardingDistributionFunction()
|
||||
if replicas > 1 {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
distributionFunction = GetDistributionFunction(clusterSharding.GetClusterAccessor(), shardingAlgorithm, replicas)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
clusterSharding.getClusterShard = distributionFunction
|
||||
return clusterSharding
|
||||
}
|
||||
|
||||
// IsManagedCluster returns wheter or not the cluster should be processed by a given shard.
|
||||
func (s *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard
|
||||
return true
|
||||
}
|
||||
clusterShard := 0
|
||||
if shard, ok := s.Shards[c.Server]; ok {
|
||||
clusterShard = shard
|
||||
} else {
|
||||
log.Warnf("The cluster %s has no assigned shard.", c.Server)
|
||||
}
|
||||
log.Debugf("Checking if cluster %s with clusterShard %d should be processed by shard %d", c.Server, clusterShard, s.Shard)
|
||||
return clusterShard == s.Shard
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
newClusters := make(map[string]*v1alpha1.Cluster, len(clusters.Items))
|
||||
for _, c := range clusters.Items {
|
||||
newClusters[c.Server] = &c
|
||||
}
|
||||
sharding.Clusters = newClusters
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Add(c *v1alpha1.Cluster) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
old, ok := sharding.Clusters[c.Server]
|
||||
sharding.Clusters[c.Server] = c
|
||||
if !ok || hasShardingUpdates(old, c) {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. Cluster already added")
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Delete(clusterServer string) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
if _, ok := sharding.Clusters[clusterServer]; ok {
|
||||
delete(sharding.Clusters, clusterServer)
|
||||
delete(sharding.Shards, clusterServer)
|
||||
sharding.updateDistribution()
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Update(c *v1alpha1.Cluster) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
old, ok := sharding.Clusters[c.Server]
|
||||
sharding.Clusters[c.Server] = c
|
||||
if !ok || hasShardingUpdates(old, c) {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. No relevant changes")
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) GetDistribution() map[string]int {
|
||||
sharding.lock.RLock()
|
||||
shards := sharding.Shards
|
||||
sharding.lock.RUnlock()
|
||||
|
||||
distribution := make(map[string]int, len(shards))
|
||||
for k, v := range shards {
|
||||
distribution[k] = v
|
||||
}
|
||||
return distribution
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) updateDistribution() {
|
||||
log.Info("Updating cluster shards")
|
||||
|
||||
for _, c := range sharding.Clusters {
|
||||
shard := 0
|
||||
if c.Shard != nil {
|
||||
requestedShard := int(*c.Shard)
|
||||
if requestedShard < sharding.Replicas {
|
||||
shard = requestedShard
|
||||
} else {
|
||||
log.Warnf("Specified cluster shard (%d) for cluster: %s is greater than the number of available shard (%d). Using shard 0.", requestedShard, c.Server, sharding.Replicas)
|
||||
}
|
||||
} else {
|
||||
shard = sharding.getClusterShard(c)
|
||||
}
|
||||
var shard64 int64 = int64(shard)
|
||||
c.Shard = &shard64
|
||||
sharding.Shards[c.Server] = shard
|
||||
}
|
||||
}
|
||||
|
||||
// hasShardingUpdates returns true if the sharding distribution has been updated.
|
||||
// nil checking is done for the corner case of the in-cluster cluster which may
|
||||
// have a nil shard assigned
|
||||
func hasShardingUpdates(old, new *v1alpha1.Cluster) bool {
|
||||
if old == nil || new == nil || (old.Shard == nil && new.Shard == nil) {
|
||||
return false
|
||||
}
|
||||
return old.Shard != new.Shard
|
||||
}
|
||||
|
||||
func (d *ClusterSharding) GetClusterAccessor() clusterAccessor {
|
||||
return func() []*v1alpha1.Cluster {
|
||||
clusters := make([]*v1alpha1.Cluster, 0, len(d.Clusters))
|
||||
for _, c := range d.Clusters {
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
}
|
||||
@@ -40,6 +40,7 @@ const ShardControllerMappingKey = "shardControllerMapping"
|
||||
|
||||
type DistributionFunction func(c *v1alpha1.Cluster) int
|
||||
type ClusterFilterFunction func(c *v1alpha1.Cluster) bool
|
||||
type clusterAccessor func() []*v1alpha1.Cluster
|
||||
|
||||
// shardApplicationControllerMapping stores the mapping of Shard Number to Application Controller in ConfigMap.
|
||||
// It also stores the heartbeat of last synced time of the application controller.
|
||||
@@ -53,8 +54,7 @@ type shardApplicationControllerMapping struct {
|
||||
// and returns wheter or not the cluster should be processed by a given shard. It calls the distributionFunction
|
||||
// to determine which shard will process the cluster, and if the given shard is equal to the calculated shard
|
||||
// the function will return true.
|
||||
func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, shard int) ClusterFilterFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, replicas, shard int) ClusterFilterFunction {
|
||||
return func(c *v1alpha1.Cluster) bool {
|
||||
clusterShard := 0
|
||||
if c != nil && c.Shard != nil {
|
||||
@@ -73,14 +73,14 @@ func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, s
|
||||
|
||||
// GetDistributionFunction returns which DistributionFunction should be used based on the passed algorithm and
|
||||
// the current datas.
|
||||
func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) DistributionFunction {
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction(db)
|
||||
func GetDistributionFunction(clusters clusterAccessor, shardingAlgorithm string, replicasCount int) DistributionFunction {
|
||||
log.Debugf("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction(replicasCount)
|
||||
switch shardingAlgorithm {
|
||||
case common.RoundRobinShardingAlgorithm:
|
||||
distributionFunction = RoundRobinDistributionFunction(db)
|
||||
distributionFunction = RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
case common.LegacyShardingAlgorithm:
|
||||
distributionFunction = LegacyDistributionFunction(db)
|
||||
distributionFunction = LegacyDistributionFunction(replicasCount)
|
||||
default:
|
||||
log.Warnf("distribution type %s is not supported, defaulting to %s", shardingAlgorithm, common.DefaultShardingAlgorithm)
|
||||
}
|
||||
@@ -92,15 +92,21 @@ func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) Distributio
|
||||
// is lightweight and can be distributed easily, however, it does not ensure an homogenous distribution as
|
||||
// some shards may get assigned more clusters than others. It is the legacy function distribution that is
|
||||
// kept for compatibility reasons
|
||||
func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
func LegacyDistributionFunction(replicas int) DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas == 0 {
|
||||
log.Debugf("Replicas count is : %d, returning -1", replicas)
|
||||
return -1
|
||||
}
|
||||
if c == nil {
|
||||
log.Debug("In-cluster: returning 0")
|
||||
return 0
|
||||
}
|
||||
// if Shard is manually set and the assigned value is lower than the number of replicas,
|
||||
// then its value is returned otherwise it is the default calculated value
|
||||
if c.Shard != nil && int(*c.Shard) < replicas {
|
||||
return int(*c.Shard)
|
||||
}
|
||||
id := c.ID
|
||||
log.Debugf("Calculating cluster shard for cluster id: %s", id)
|
||||
if id == "" {
|
||||
@@ -121,14 +127,19 @@ func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
// This function ensures an homogenous distribution: each shards got assigned the same number of
|
||||
// clusters +/-1 , but with the drawback of a reshuffling of clusters accross shards in case of some changes
|
||||
// in the cluster list
|
||||
func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
|
||||
func RoundRobinDistributionFunction(clusters clusterAccessor, replicas int) DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas > 0 {
|
||||
if c == nil { // in-cluster does not necessarly have a secret assigned. So we are receiving a nil cluster here.
|
||||
return 0
|
||||
}
|
||||
// if Shard is manually set and the assigned value is lower than the number of replicas,
|
||||
// then its value is returned otherwise it is the default calculated value
|
||||
if c.Shard != nil && int(*c.Shard) < replicas {
|
||||
return int(*c.Shard)
|
||||
} else {
|
||||
clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(db)
|
||||
clusterIndexdByClusterIdMap := createClusterIndexByClusterIdMap(clusters)
|
||||
clusterIndex, ok := clusterIndexdByClusterIdMap[c.ID]
|
||||
if !ok {
|
||||
log.Warnf("Cluster with id=%s not found in cluster map.", c.ID)
|
||||
@@ -144,6 +155,12 @@ func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
}
|
||||
}
|
||||
|
||||
// NoShardingDistributionFunction returns a DistributionFunction that will process all cluster by shard 0
|
||||
// the function is created for API compatibility purposes and is not supposed to be activated.
|
||||
func NoShardingDistributionFunction() DistributionFunction {
|
||||
return func(c *v1alpha1.Cluster) int { return 0 }
|
||||
}
|
||||
|
||||
// InferShard extracts the shard index based on its hostname.
|
||||
func InferShard() (int, error) {
|
||||
hostname, err := osHostnameFunction()
|
||||
@@ -152,33 +169,29 @@ func InferShard() (int, error) {
|
||||
}
|
||||
parts := strings.Split(hostname, "-")
|
||||
if len(parts) == 0 {
|
||||
return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
|
||||
log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
|
||||
return 0, nil
|
||||
}
|
||||
shard, err := strconv.Atoi(parts[len(parts)-1])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("hostname should ends with shard number separated by '-' but got: %s", hostname)
|
||||
log.Warnf("hostname should end with shard number separated by '-' but got: %s", hostname)
|
||||
return 0, nil
|
||||
}
|
||||
return int(shard), nil
|
||||
}
|
||||
|
||||
func getSortedClustersList(db db.ArgoDB) []v1alpha1.Cluster {
|
||||
ctx := context.Background()
|
||||
clustersList, dbErr := db.ListClusters(ctx)
|
||||
if dbErr != nil {
|
||||
log.Warnf("Error while querying clusters list from database: %v", dbErr)
|
||||
return []v1alpha1.Cluster{}
|
||||
}
|
||||
clusters := clustersList.Items
|
||||
func getSortedClustersList(getCluster clusterAccessor) []*v1alpha1.Cluster {
|
||||
clusters := getCluster()
|
||||
sort.Slice(clusters, func(i, j int) bool {
|
||||
return clusters[i].ID < clusters[j].ID
|
||||
})
|
||||
return clusters
|
||||
}
|
||||
|
||||
func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
|
||||
clusters := getSortedClustersList(db)
|
||||
func createClusterIndexByClusterIdMap(getCluster clusterAccessor) map[string]int {
|
||||
clusters := getSortedClustersList(getCluster)
|
||||
log.Debugf("ClustersList has %d items", len(clusters))
|
||||
clusterById := make(map[string]v1alpha1.Cluster)
|
||||
clusterById := make(map[string]*v1alpha1.Cluster)
|
||||
clusterIndexedByClusterId := make(map[string]int)
|
||||
for i, cluster := range clusters {
|
||||
log.Debugf("Adding cluster with id=%s and name=%s to cluster's map", cluster.ID, cluster.Name)
|
||||
@@ -194,7 +207,6 @@ func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
|
||||
// If the shard value passed to this function is -1, that is, the shard was not set as an environment variable,
|
||||
// we default the shard number to 0 for computing the default config map.
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
|
||||
hostname, err := osHostnameFunction()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -19,18 +20,20 @@ import (
|
||||
|
||||
func TestGetShardByID_NotEmptyID(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(replicasCount)(&v1alpha1.Cluster{ID: "4"}))
|
||||
}
|
||||
|
||||
func TestGetShardByID_EmptyID(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(replicasCount)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, 0, shard)
|
||||
}
|
||||
|
||||
@@ -38,7 +41,7 @@ func TestGetShardByID_NoReplicas(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(0)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
@@ -46,16 +49,16 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunction(t *testing.T) {
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(0)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
// Test with replicas set to 0
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, 0)
|
||||
assert.Equal(t, -1, distributionFunction(nil))
|
||||
assert.Equal(t, -1, distributionFunction(&cluster1))
|
||||
assert.Equal(t, -1, distributionFunction(&cluster2))
|
||||
@@ -65,137 +68,112 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *tes
|
||||
}
|
||||
|
||||
func TestGetClusterFilterDefault(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
clusterAccessor, _, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 2
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterLegacy(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterUnknown(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
// Test with replicas set to 0
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "unknown")
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, "unknown"), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := GetDistributionFunction(clusterAccessor, "unknown", replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster3))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster4))
|
||||
}
|
||||
|
||||
func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "5")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 5
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
filter := GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, 0, filter(nil))
|
||||
assert.Equal(t, 4, filter(&cluster1))
|
||||
assert.Equal(t, 1, filter(&cluster2))
|
||||
assert.Equal(t, 2, filter(&cluster3))
|
||||
assert.Equal(t, 2, filter(&cluster4))
|
||||
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
cluster5 := &v1alpha1.Cluster{ID: "5", Shard: &fixedShard}
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
cluster5.Shard = &fixedShard
|
||||
clusterAccessor = getClusterAccessor([]v1alpha1.Cluster{cluster1, cluster2, cluster2, cluster4, *cluster5})
|
||||
filter = GetDistributionFunction(clusterAccessor, common.DefaultShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
assert.False(t, filter(&cluster3))
|
||||
assert.True(t, filter(&cluster4))
|
||||
//shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "4")
|
||||
clusterAccessor, db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
replicasCount := 4
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
|
||||
filter := GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, filter(nil), 0)
|
||||
assert.Equal(t, filter(&cluster1), 0)
|
||||
assert.Equal(t, filter(&cluster2), 1)
|
||||
assert.Equal(t, filter(&cluster3), 2)
|
||||
assert.Equal(t, filter(&cluster4), 3)
|
||||
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
var fixedShard int64 = 1
|
||||
cluster5 := v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&cluster5))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterLegacyHash(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "hash")
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
assert.False(t, filter(&cluster3))
|
||||
assert.True(t, filter(&cluster4))
|
||||
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterWithEnvControllerShardingAlgorithms(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
shardIndex := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
|
||||
t.Run("legacy", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster4))
|
||||
assert.False(t, shardShouldProcessCluster(nil))
|
||||
})
|
||||
|
||||
t.Run("roundrobin", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster4))
|
||||
assert.False(t, shardShouldProcessCluster(nil))
|
||||
})
|
||||
cluster5 = v1alpha1.Cluster{Name: "cluster5", ID: "5", Shard: &fixedShard}
|
||||
clusters = []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor = getClusterAccessor(clusters)
|
||||
filter = GetDistributionFunction(clusterAccessor, common.RoundRobinShardingAlgorithm, replicasCount)
|
||||
assert.Equal(t, int(fixedShard), filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
clusters, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
|
||||
t.Run("replicas set to 1", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(1).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 1
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster2))
|
||||
@@ -205,8 +183,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 2", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(2).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -216,8 +195,9 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 3", func(t *testing.T) {
|
||||
db.On("GetApplicationControllerReplicas").Return(3).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
replicasCount := 3
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -233,17 +213,19 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterNumber
|
||||
// Initial tests where showing that under 1024 clusters, execution time was around 400ms
|
||||
// and for 4096 clusters, execution time was under 9s
|
||||
// The other implementation was giving almost linear time of 400ms up to 10'000 clusters
|
||||
db := dbmocks.ArgoDB{}
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{}}
|
||||
clusterPointers := []*v1alpha1.Cluster{}
|
||||
for i := 0; i < 2048; i++ {
|
||||
cluster := createCluster(fmt.Sprintf("cluster-%d", i), fmt.Sprintf("%d", i))
|
||||
clusterList.Items = append(clusterList.Items, cluster)
|
||||
clusterPointers = append(clusterPointers, &cluster)
|
||||
}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
for i, c := range clusterList.Items {
|
||||
assert.Equal(t, i%2, distributionFunction(&c))
|
||||
replicasCount := 2
|
||||
t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
|
||||
_, db, _, _, _, _, _ := createTestClusters()
|
||||
clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
for i, c := range clusterPointers {
|
||||
assert.Equal(t, i%2, distributionFunction(c))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,12 +238,15 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
cluster5 := createCluster("cluster5", "5")
|
||||
cluster6 := createCluster("cluster6", "6")
|
||||
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
clusterAccessor := getClusterAccessor(clusters)
|
||||
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
|
||||
// Test with replicas set to 2
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
@@ -272,17 +257,20 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
|
||||
// Now, the database knows cluster6. Shard should be assigned a proper shard
|
||||
clusterList.Items = append(clusterList.Items, cluster6)
|
||||
distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
|
||||
assert.Equal(t, 1, distributionFunction(&cluster6))
|
||||
|
||||
// Now, we remove the last added cluster, it should be unassigned as well
|
||||
clusterList.Items = clusterList.Items[:len(clusterList.Items)-1]
|
||||
distributionFunction = RoundRobinDistributionFunction(getClusterAccessor(clusterList.Items), replicasCount)
|
||||
assert.Equal(t, -1, distributionFunction(&cluster6))
|
||||
}
|
||||
|
||||
func TestGetShardByIndexModuloReplicasCountDistributionFunction(t *testing.T) {
|
||||
db, cluster1, cluster2, _, _, _ := createTestClusters()
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
clusters, db, cluster1, cluster2, _, _, _ := createTestClusters()
|
||||
replicasCount := 2
|
||||
db.On("GetApplicationControllerReplicas").Return(replicasCount)
|
||||
distributionFunction := RoundRobinDistributionFunction(clusters, replicasCount)
|
||||
|
||||
// Test that the function returns the correct shard for cluster1 and cluster2
|
||||
expectedShardForCluster1 := 0
|
||||
@@ -315,14 +303,14 @@ func TestInferShard(t *testing.T) {
|
||||
|
||||
osHostnameFunction = func() (string, error) { return "exampleshard", nil }
|
||||
_, err = InferShard()
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
osHostnameFunction = func() (string, error) { return "example-shard", nil }
|
||||
_, err = InferShard()
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
|
||||
func createTestClusters() (clusterAccessor, *dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster, v1alpha1.Cluster) {
|
||||
db := dbmocks.ArgoDB{}
|
||||
cluster1 := createCluster("cluster1", "1")
|
||||
cluster2 := createCluster("cluster2", "2")
|
||||
@@ -330,10 +318,27 @@ func createTestClusters() (*dbmocks.ArgoDB, v1alpha1.Cluster, v1alpha1.Cluster,
|
||||
cluster4 := createCluster("cluster4", "4")
|
||||
cluster5 := createCluster("cluster5", "5")
|
||||
|
||||
clusters := []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}
|
||||
|
||||
db.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
cluster1, cluster2, cluster3, cluster4, cluster5,
|
||||
}}, nil)
|
||||
return &db, cluster1, cluster2, cluster3, cluster4, cluster5
|
||||
return getClusterAccessor(clusters), &db, cluster1, cluster2, cluster3, cluster4, cluster5
|
||||
}
|
||||
|
||||
func getClusterAccessor(clusters []v1alpha1.Cluster) clusterAccessor {
|
||||
// Convert the array to a slice of pointers
|
||||
clusterPointers := getClusterPointers(clusters)
|
||||
clusterAccessor := func() []*v1alpha1.Cluster { return clusterPointers }
|
||||
return clusterAccessor
|
||||
}
|
||||
|
||||
func getClusterPointers(clusters []v1alpha1.Cluster) []*v1alpha1.Cluster {
|
||||
var clusterPointers []*v1alpha1.Cluster
|
||||
for i := range clusters {
|
||||
clusterPointers = append(clusterPointers, &clusters[i])
|
||||
}
|
||||
return clusterPointers
|
||||
}
|
||||
|
||||
func createCluster(name string, id string) v1alpha1.Cluster {
|
||||
|
||||
@@ -3,6 +3,7 @@ package sharding
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
@@ -22,9 +23,11 @@ func TestLargeShuffle(t *testing.T) {
|
||||
clusterList.Items = append(clusterList.Items, cluster)
|
||||
}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
clusterAccessor := getClusterAccessor(clusterList.Items)
|
||||
// Test with replicas set to 256
|
||||
t.Setenv(common.EnvControllerReplicas, "256")
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 256
|
||||
t.Setenv(common.EnvControllerReplicas, strconv.Itoa(replicasCount))
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
for i, c := range clusterList.Items {
|
||||
assert.Equal(t, i%2567, distributionFunction(&c))
|
||||
}
|
||||
@@ -44,10 +47,11 @@ func TestShuffle(t *testing.T) {
|
||||
|
||||
clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5, cluster6}}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
|
||||
clusterAccessor := getClusterAccessor(clusterList.Items)
|
||||
// Test with replicas set to 3
|
||||
t.Setenv(common.EnvControllerReplicas, "3")
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
replicasCount := 3
|
||||
distributionFunction := RoundRobinDistributionFunction(clusterAccessor, replicasCount)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
assert.Equal(t, 1, distributionFunction(&cluster2))
|
||||
|
||||
@@ -67,7 +67,7 @@ metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
application.namespaces: app-team-one, app-team-two
|
||||
notificationscontroller.selfservice.enabled: true
|
||||
notificationscontroller.selfservice.enabled: "true"
|
||||
```
|
||||
|
||||
To use this feature, you can deploy configmap named `argocd-notifications-cm` and possibly a secret `argocd-notifications-secret` in the namespace where the Argo CD application lives.
|
||||
|
||||
@@ -26,6 +26,7 @@ argocd-server [flags]
|
||||
|
||||
```
|
||||
--address string Listen on given address (default "0.0.0.0")
|
||||
--api-content-types string Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty. (default "application/json")
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces where application resources can be managed in
|
||||
--as string Username to impersonate for the operation
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 2.7 | v1.26, v1.25, v1.24, v1.23 |
|
||||
| 2.6 | v1.24, v1.23, v1.22 |
|
||||
| 2.5 | v1.24, v1.23, v1.22 |
|
||||
|
||||
| 2.10 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.9 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.8 | v1.27, v1.26, v1.25, v1.24 |
|
||||
|
||||
@@ -62,6 +62,6 @@ argocd admin cluster namespaces my-cluster
|
||||
* [argocd admin cluster generate-spec](argocd_admin_cluster_generate-spec.md) - Generate declarative config for a cluster
|
||||
* [argocd admin cluster kubeconfig](argocd_admin_cluster_kubeconfig.md) - Generates kubeconfig for the specified cluster
|
||||
* [argocd admin cluster namespaces](argocd_admin_cluster_namespaces.md) - Print information namespaces which Argo CD manages in each cluster.
|
||||
* [argocd admin cluster shards](argocd_admin_cluster_shards.md) - Print information about each controller shard and portion of Kubernetes resources it is responsible for.
|
||||
* [argocd admin cluster shards](argocd_admin_cluster_shards.md) - Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.
|
||||
* [argocd admin cluster stats](argocd_admin_cluster_stats.md) - Prints information cluster statistics and inferred shard number
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## argocd admin cluster shards
|
||||
|
||||
Print information about each controller shard and portion of Kubernetes resources it is responsible for.
|
||||
Print information about each controller shard and the estimated portion of Kubernetes resources it is responsible for.
|
||||
|
||||
```
|
||||
argocd admin cluster shards [flags]
|
||||
@@ -43,6 +43,7 @@ argocd admin cluster shards [flags]
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--shard int Cluster shard filter (default -1)
|
||||
--sharding-method string Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
|
||||
@@ -57,6 +57,7 @@ argocd admin cluster stats target-cluster
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--shard int Cluster shard filter (default -1)
|
||||
--sharding-method string Sharding method. Defaults: legacy. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
|
||||
@@ -56,7 +56,13 @@ Application.
|
||||
Add the following entry in the argocd-cmd-params-cm configmap:
|
||||
|
||||
```
|
||||
controller.diff.server.side: "true"
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
controller.diff.server.side: "true"
|
||||
...
|
||||
```
|
||||
|
||||
Note: It is necessary to restart the `argocd-application-controller`
|
||||
|
||||
21
go.mod
21
go.mod
@@ -28,7 +28,7 @@ require (
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gfleury/go-bitbucket-v1 v0.0.0-20220301131131-8e7ed04b843e
|
||||
github.com/go-git/go-git/v5 v5.8.1
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/go-logr/logr v1.3.0
|
||||
github.com/go-openapi/loads v0.21.2
|
||||
github.com/go-openapi/runtime v0.26.0
|
||||
@@ -80,11 +80,11 @@ require (
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/crypto v0.16.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/term v0.13.0
|
||||
golang.org/x/term v0.15.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
@@ -159,9 +159,8 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -184,7 +183,7 @@ require (
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.5.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
@@ -251,7 +250,7 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sergi/go-diff v1.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.1 // indirect
|
||||
github.com/slack-go/slack v0.12.2 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
@@ -268,11 +267,11 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/net v0.19.0
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.12.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
gomodules.xyz/notify v0.1.1 // indirect
|
||||
|
||||
51
go.sum
51
go.sum
@@ -657,8 +657,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0 h1:S1NcMKECxT5hJwV4VT+QzeSsSiv4oWl1s2821dUqG/8=
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0/go.mod h1:PuFyJKRz1liIAH4h5KVXVD18Obpp1ZXRdxHvmGXooro=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 h1:prBTRx78AQnXzivNT9Crhu564W/zPPr3ibSlpT9xKcE=
|
||||
@@ -668,8 +668,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d h1:WtAMR0fPCOfK7TPGZ8ZpLLY18HRvL7XJ3xcs0wnREgo=
|
||||
github.com/TomOnTime/utfutil v0.0.0-20180511104225-09c41003ee1d/go.mod h1:WML6KOYjeU8N6YyusMjj2qRvaPNUEvrQvaxuFcMRFJY=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
|
||||
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
|
||||
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
|
||||
@@ -850,8 +848,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0=
|
||||
github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
|
||||
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
@@ -925,12 +923,12 @@ github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H
|
||||
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
|
||||
github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
|
||||
github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A=
|
||||
github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo=
|
||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -1377,8 +1375,6 @@ github.com/malexdev/utfutil v0.0.0-20180510171754-00c8d4a8e7a8 h1:A6SLdFpRzUUF5v
|
||||
github.com/malexdev/utfutil v0.0.0-20180510171754-00c8d4a8e7a8/go.mod h1:UtpLyb/EupVKXF/N0b4NRe1DNg+QYJsnsHQ038romhM=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
|
||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
@@ -1498,8 +1494,9 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ
|
||||
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
|
||||
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
@@ -1625,8 +1622,8 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
|
||||
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
|
||||
github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo=
|
||||
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
|
||||
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ=
|
||||
@@ -1815,8 +1812,9 @@ golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1961,8 +1959,9 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -2134,8 +2133,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -2149,8 +2148,9 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -2169,8 +2169,9 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -2260,8 +2261,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
|
||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -36,3 +36,11 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.10.0-rc2
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -20582,6 +20582,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -21018,7 +21026,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -21342,7 +21350,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -21394,7 +21402,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -21649,7 +21657,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.10.0-rc2
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.10.0-rc2
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
@@ -20618,6 +20618,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -22275,7 +22283,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -22398,7 +22406,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -22480,7 +22488,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -22835,7 +22843,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -22887,7 +22895,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -23200,7 +23208,7 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -23482,7 +23490,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -109,6 +109,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -1660,7 +1668,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1783,7 +1791,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1865,7 +1873,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2220,7 +2228,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2272,7 +2280,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2585,7 +2593,7 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2867,7 +2875,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -20609,6 +20609,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -21370,7 +21378,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -21493,7 +21501,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -21575,7 +21583,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -21881,7 +21889,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -21933,7 +21941,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -22244,7 +22252,7 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -22526,7 +22534,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -100,6 +100,14 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -755,7 +763,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -878,7 +886,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -960,7 +968,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1266,7 +1274,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1318,7 +1326,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1629,7 +1637,7 @@ spec:
|
||||
key: server.k8sclient.retry.base.backoff
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1911,7 +1919,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.10.0-rc2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
hs = { status = "Progressing", message = "AdvancedCronJobs has active jobs" }
|
||||
-- Extract lastScheduleTime and convert to time objects
|
||||
lastScheduleTime = nil
|
||||
|
||||
if obj.status.lastScheduleTime ~= nil then
|
||||
local year, month, day, hour, min, sec = string.match(obj.status.lastScheduleTime, "(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z")
|
||||
lastScheduleTime = os.time({year=year, month=month, day=day, hour=hour, min=min, sec=sec})
|
||||
end
|
||||
|
||||
|
||||
if lastScheduleTime == nil and obj.spec.paused == true then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "AdvancedCronJob is Paused"
|
||||
return hs
|
||||
end
|
||||
|
||||
-- AdvancedCronJobs are progressing if they have any object in the "active" state
|
||||
if obj.status.active ~= nil and #obj.status.active > 0 then
|
||||
hs.status = "Progressing"
|
||||
hs.message = "AdvancedCronJobs has active jobs"
|
||||
return hs
|
||||
end
|
||||
-- AdvancedCronJobs are Degraded if they don't have lastScheduleTime
|
||||
if lastScheduleTime == nil then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "AdvancedCronJobs has not run successfully"
|
||||
return hs
|
||||
end
|
||||
-- AdvancedCronJobs are healthy if they have lastScheduleTime
|
||||
if lastScheduleTime ~= nil then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "AdvancedCronJobs has run successfully"
|
||||
return hs
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,17 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: AdvancedCronJobs has run successfully
|
||||
inputPath: testdata/lastScheduleTime.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: AdvancedCronJobs has not run successfully
|
||||
inputPath: testdata/notScheduled.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: AdvancedCronJobs has active jobs
|
||||
inputPath: testdata/activeJobs.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: AdvancedCronJob is Paused
|
||||
inputPath: testdata/suspended.yaml
|
||||
30
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml
vendored
Normal file
30
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/activeJobs.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
|
||||
status:
|
||||
active:
|
||||
- apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
name: acj-test-1694882400
|
||||
namespace: default
|
||||
resourceVersion: '4012'
|
||||
uid: 2b08a429-a43b-4382-8e5d-3db0c72b5b13
|
||||
lastScheduleTime: '2023-09-16T16:40:00Z'
|
||||
type: BroadcastJob
|
||||
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml
vendored
Normal file
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/lastScheduleTime.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
|
||||
status:
|
||||
lastScheduleTime: "2023-09-16T16:29:00Z"
|
||||
type: BroadcastJob
|
||||
22
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml
vendored
Normal file
22
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/notScheduled.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
|
||||
status:
|
||||
lastScheduleTime: null
|
||||
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml
vendored
Normal file
23
resource_customizations/apps.kruise.io/AdvancedCronJob/testdata/suspended.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: AdvancedCronJob
|
||||
metadata:
|
||||
name: acj-test
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
template:
|
||||
broadcastJobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 30
|
||||
paused: true
|
||||
|
||||
status:
|
||||
type: BroadcastJob
|
||||
@@ -0,0 +1,32 @@
|
||||
hs={ status= "Progressing", message= "BroadcastJob is still running" }
|
||||
|
||||
if obj.status ~= nil then
|
||||
|
||||
-- BroadcastJob are healthy if desired number and succeeded number is equal
|
||||
if obj.status.desired == obj.status.succeeded and obj.status.phase == "completed" then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "BroadcastJob is completed successfully"
|
||||
return hs
|
||||
end
|
||||
-- BroadcastJob are progressing if active is not equal to 0
|
||||
if obj.status.active ~= 0 and obj.status.phase == "running" then
|
||||
hs.status = "Progressing"
|
||||
hs.message = "BroadcastJob is still running"
|
||||
return hs
|
||||
end
|
||||
-- BroadcastJob are progressing if failed is not equal to 0
|
||||
if obj.status.failed ~= 0 and obj.status.phase == "failed" then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "BroadcastJob failed"
|
||||
return hs
|
||||
end
|
||||
|
||||
if obj.status.phase == "paused" and obj.spec.paused == true then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "BroadcastJob is Paused"
|
||||
return hs
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,17 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "BroadcastJob is completed successfully"
|
||||
inputPath: testdata/succeeded.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "BroadcastJob failed"
|
||||
inputPath: testdata/failed.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "BroadcastJob is still running"
|
||||
inputPath: testdata/running.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "BroadcastJob is Paused"
|
||||
inputPath: testdata/suspended.yaml
|
||||
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml
vendored
Normal file
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/failed.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
metadata:
|
||||
name: failed-job
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: openkruise/guestbook:v3
|
||||
command: ["exit", "1"] # a dummy command to fail
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds
|
||||
|
||||
status:
|
||||
active: 0
|
||||
completionTime: '2023-09-17T14:31:38Z'
|
||||
conditions:
|
||||
- lastProbeTime: '2023-09-17T14:31:38Z'
|
||||
lastTransitionTime: '2023-09-17T14:31:38Z'
|
||||
message: failure policy is FailurePolicyTypeFailFast and failed pod is found
|
||||
reason: Failed
|
||||
status: 'True'
|
||||
type: Failed
|
||||
desired: 1
|
||||
failed: 1
|
||||
phase: failed
|
||||
startTime: '2023-09-17T14:31:32Z'
|
||||
succeeded: 0
|
||||
22
resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml
vendored
Normal file
22
resource_customizations/apps.kruise.io/BroadcastJob/testdata/running.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
metadata:
|
||||
name: download-image
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: openkruise/guestbook:v3
|
||||
command: ["echo", "started"] # a dummy command to do nothing
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds
|
||||
status:
|
||||
active: 1
|
||||
desired: 1
|
||||
failed: 0
|
||||
phase: running
|
||||
startTime: '2023-09-17T14:43:30Z'
|
||||
succeeded: 0
|
||||
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml
vendored
Normal file
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/succeeded.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
metadata:
|
||||
name: download-image
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: openkruise/guestbook:v3
|
||||
command: ["echo", "started"] # a dummy command to do nothing
|
||||
restartPolicy: Never
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds
|
||||
status:
|
||||
active: 0
|
||||
completionTime: '2023-09-17T14:35:14Z'
|
||||
conditions:
|
||||
- lastProbeTime: '2023-09-17T14:35:14Z'
|
||||
lastTransitionTime: '2023-09-17T14:35:14Z'
|
||||
message: Job completed, 1 pods succeeded, 0 pods failed
|
||||
reason: Complete
|
||||
status: 'True'
|
||||
type: Complete
|
||||
desired: 1
|
||||
failed: 0
|
||||
phase: completed
|
||||
startTime: '2023-09-17T14:35:07Z'
|
||||
succeeded: 1
|
||||
|
||||
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/suspended.yaml
vendored
Normal file
31
resource_customizations/apps.kruise.io/BroadcastJob/testdata/suspended.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: BroadcastJob
|
||||
metadata:
|
||||
name: download-image
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: guestbook
|
||||
image: openkruise/guestbook:v3
|
||||
command: ["echo", "started"] # a dummy command to do nothing
|
||||
restartPolicy: Never
|
||||
paused: true
|
||||
completionPolicy:
|
||||
type: Always
|
||||
ttlSecondsAfterFinished: 60 # the job will be deleted after 60 seconds
|
||||
status:
|
||||
active: 0
|
||||
completionTime: '2023-09-17T14:35:14Z'
|
||||
conditions:
|
||||
- lastProbeTime: '2023-09-17T14:35:14Z'
|
||||
lastTransitionTime: '2023-09-17T14:35:14Z'
|
||||
message: Job completed, 1 pods succeeded, 0 pods failed
|
||||
reason: Complete
|
||||
status: 'True'
|
||||
type: Complete
|
||||
desired: 1
|
||||
failed: 0
|
||||
phase: paused
|
||||
startTime: '2023-09-17T14:35:07Z'
|
||||
succeeded: 0
|
||||
33
resource_customizations/apps.kruise.io/CloneSet/health.lua
Normal file
33
resource_customizations/apps.kruise.io/CloneSet/health.lua
Normal file
@@ -0,0 +1,33 @@
|
||||
hs={ status = "Progressing", message = "Waiting for initialization" }
|
||||
|
||||
if obj.status ~= nil then
|
||||
|
||||
if obj.metadata.generation == obj.status.observedGeneration then
|
||||
|
||||
if obj.spec.updateStrategy.paused == true or not obj.status.updatedAvailableReplicas then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Cloneset is paused"
|
||||
return hs
|
||||
elseif obj.spec.updateStrategy.partition ~= 0 and obj.metadata.generation > 1 then
|
||||
if obj.status.updatedReplicas >= obj.status.expectedUpdatedReplicas then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Cloneset needs manual intervention"
|
||||
return hs
|
||||
end
|
||||
|
||||
elseif obj.status.updatedAvailableReplicas == obj.status.replicas then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "All Cloneset workloads are ready and updated"
|
||||
return hs
|
||||
|
||||
else
|
||||
if obj.status.updatedAvailableReplicas ~= obj.status.replicas then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "Some replicas are not ready or available"
|
||||
return hs
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,21 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "All Cloneset workloads are ready and updated"
|
||||
inputPath: testdata/healthy.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "Some replicas are not ready or available"
|
||||
inputPath: testdata/degraded.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "Waiting for initialization"
|
||||
inputPath: testdata/unknown.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "Cloneset is paused"
|
||||
inputpath: testdata/suspended.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "Cloneset needs manual intervention"
|
||||
inputpath: testdata/partition_suspended.yaml
|
||||
35
resource_customizations/apps.kruise.io/CloneSet/testdata/degraded.yaml
vendored
Normal file
35
resource_customizations/apps.kruise.io/CloneSet/testdata/degraded.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: CloneSet
|
||||
metadata:
|
||||
name: cloneset-test
|
||||
namespace: kruise
|
||||
generation: 1
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
paused: false
|
||||
|
||||
status:
|
||||
observedGeneration: 1
|
||||
replicas: 2
|
||||
updatedReadyReplicas: 1
|
||||
updatedAvailableReplicas: 1
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-09-21T22:35:31Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: FailedScale
|
||||
36
resource_customizations/apps.kruise.io/CloneSet/testdata/healthy.yaml
vendored
Normal file
36
resource_customizations/apps.kruise.io/CloneSet/testdata/healthy.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: CloneSet
|
||||
metadata:
|
||||
name: cloneset-test
|
||||
namespace: kruise
|
||||
generation: 1
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
paused: false
|
||||
|
||||
|
||||
status:
|
||||
observedGeneration: 1
|
||||
replicas: 2
|
||||
updatedReadyReplicas: 2
|
||||
updatedAvailableReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-09-21T22:35:31Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: FailedScale
|
||||
31
resource_customizations/apps.kruise.io/CloneSet/testdata/partition_suspended.yaml
vendored
Normal file
31
resource_customizations/apps.kruise.io/CloneSet/testdata/partition_suspended.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: CloneSet
|
||||
metadata:
|
||||
name: cloneset-test
|
||||
namespace: kruise
|
||||
generation: 2
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 5
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
partition: 3
|
||||
|
||||
status:
|
||||
observedGeneration: 2
|
||||
replicas: 5
|
||||
expectedUpdatedReplicas: 2
|
||||
updatedReadyReplicas: 1
|
||||
updatedAvailableReplicas: 1
|
||||
updatedReplicas: 3
|
||||
35
resource_customizations/apps.kruise.io/CloneSet/testdata/suspended.yaml
vendored
Normal file
35
resource_customizations/apps.kruise.io/CloneSet/testdata/suspended.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: CloneSet
|
||||
metadata:
|
||||
name: cloneset-test
|
||||
namespace: kruise
|
||||
generation: 2
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
paused: true
|
||||
|
||||
status:
|
||||
observedGeneration: 2
|
||||
replicas: 2
|
||||
updatedReadyReplicas: 2
|
||||
updatedAvailableReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-09-21T22:35:31Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: FailedScale
|
||||
5
resource_customizations/apps.kruise.io/CloneSet/testdata/unknown.yaml
vendored
Normal file
5
resource_customizations/apps.kruise.io/CloneSet/testdata/unknown.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: CloneSet
|
||||
metadata:
|
||||
name: cloneset-test
|
||||
namespace: kruise
|
||||
35
resource_customizations/apps.kruise.io/DaemonSet/health.lua
Normal file
35
resource_customizations/apps.kruise.io/DaemonSet/health.lua
Normal file
@@ -0,0 +1,35 @@
|
||||
hs={ status = "Progressing", message = "Waiting for initialization" }
|
||||
|
||||
if obj.status ~= nil then
|
||||
|
||||
if obj.metadata.generation == obj.status.observedGeneration then
|
||||
|
||||
if obj.spec.updateStrategy.rollingUpdate.paused == true or not obj.status.updatedNumberScheduled then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Daemonset is paused"
|
||||
return hs
|
||||
elseif obj.spec.updateStrategy.rollingUpdate.partition ~= 0 and obj.metadata.generation > 1 then
|
||||
if obj.status.updatedNumberScheduled > (obj.status.desiredNumberScheduled - obj.spec.updateStrategy.rollingUpdate.partition) then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Daemonset needs manual intervention"
|
||||
return hs
|
||||
end
|
||||
|
||||
elseif (obj.status.updatedNumberScheduled == obj.status.desiredNumberScheduled) and (obj.status.numberAvailable == obj.status.desiredNumberScheduled) then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "All Daemonset workloads are ready and updated"
|
||||
return hs
|
||||
|
||||
else
|
||||
if (obj.status.updatedNumberScheduled == obj.status.desiredNumberScheduled) and (obj.status.numberUnavailable == obj.status.desiredNumberScheduled) then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "Some pods are not ready or available"
|
||||
return hs
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,21 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "All Daemonset workloads are ready and updated"
|
||||
inputPath: testdata/healthy.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "Some pods are not ready or available"
|
||||
inputPath: testdata/degraded.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "Waiting for initialization"
|
||||
inputPath: testdata/unknown.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "Daemonset is paused"
|
||||
inputPath: testdata/suspended.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "Daemonset needs manual intervention"
|
||||
inputPath: testdata/partition_suspended.yaml
|
||||
34
resource_customizations/apps.kruise.io/DaemonSet/testdata/degraded.yaml
vendored
Normal file
34
resource_customizations/apps.kruise.io/DaemonSet/testdata/degraded.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: daemonset-test
|
||||
namespace: kruise
|
||||
generation: 1
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
partition: 0
|
||||
paused: false
|
||||
|
||||
status:
|
||||
currentNumberScheduled: 1
|
||||
daemonSetHash: 5dffcdfcd7
|
||||
desiredNumberScheduled: 1
|
||||
numberUnavailable: 1
|
||||
numberMisscheduled: 0
|
||||
numberReady: 0
|
||||
observedGeneration: 1
|
||||
updatedNumberScheduled: 1
|
||||
34
resource_customizations/apps.kruise.io/DaemonSet/testdata/healthy.yaml
vendored
Normal file
34
resource_customizations/apps.kruise.io/DaemonSet/testdata/healthy.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: daemonset-test
|
||||
namespace: kruise
|
||||
generation: 1
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
partition: 0
|
||||
paused: false
|
||||
|
||||
status:
|
||||
currentNumberScheduled: 1
|
||||
daemonSetHash: 5dffcdfcd7
|
||||
desiredNumberScheduled: 1
|
||||
numberAvailable: 1
|
||||
numberMisscheduled: 0
|
||||
numberReady: 1
|
||||
observedGeneration: 1
|
||||
updatedNumberScheduled: 1
|
||||
33
resource_customizations/apps.kruise.io/DaemonSet/testdata/partition_suspended.yaml
vendored
Normal file
33
resource_customizations/apps.kruise.io/DaemonSet/testdata/partition_suspended.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: daemonset-test
|
||||
namespace: kruise
|
||||
generation: 6
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
partition: 4
|
||||
|
||||
status:
|
||||
currentNumberScheduled: 1
|
||||
daemonSetHash: 5f8cdcdc65
|
||||
desiredNumberScheduled: 10
|
||||
numberAvailable: 10
|
||||
numberMisscheduled: 0
|
||||
numberReady: 10
|
||||
observedGeneration: 6
|
||||
updatedNumberScheduled: 7
|
||||
33
resource_customizations/apps.kruise.io/DaemonSet/testdata/suspended.yaml
vendored
Normal file
33
resource_customizations/apps.kruise.io/DaemonSet/testdata/suspended.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: daemonset-test
|
||||
namespace: kruise
|
||||
generation: 1
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
paused: true
|
||||
|
||||
status:
|
||||
currentNumberScheduled: 1
|
||||
daemonSetHash: 5dffcdfcd7
|
||||
desiredNumberScheduled: 1
|
||||
numberAvailable: 1
|
||||
numberMisscheduled: 0
|
||||
numberReady: 1
|
||||
observedGeneration: 1
|
||||
updatedNumberScheduled: 1
|
||||
5
resource_customizations/apps.kruise.io/DaemonSet/testdata/unknown.yaml
vendored
Normal file
5
resource_customizations/apps.kruise.io/DaemonSet/testdata/unknown.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: daemonset-test
|
||||
namespace: kruise
|
||||
@@ -0,0 +1,35 @@
|
||||
hs={ status = "Progressing", message = "Waiting for initialization" }
|
||||
|
||||
if obj.status ~= nil then
|
||||
|
||||
if obj.metadata.generation == obj.status.observedGeneration then
|
||||
|
||||
if obj.spec.updateStrategy.rollingUpdate.paused == true or not obj.status.updatedAvailableReplicas then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Statefulset is paused"
|
||||
return hs
|
||||
elseif obj.spec.updateStrategy.rollingUpdate.partition ~= 0 and obj.metadata.generation > 1 then
|
||||
if obj.status.updatedReplicas > (obj.status.replicas - obj.spec.updateStrategy.rollingUpdate.partition) then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Statefulset needs manual intervention"
|
||||
return hs
|
||||
end
|
||||
|
||||
elseif obj.status.updatedAvailableReplicas == obj.status.replicas then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "All Statefulset workloads are ready and updated"
|
||||
return hs
|
||||
|
||||
else
|
||||
if obj.status.updatedAvailableReplicas ~= obj.status.replicas then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "Some replicas are not ready or available"
|
||||
return hs
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,21 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "All Statefulset workloads are ready and updated"
|
||||
inputPath: testdata/healthy.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "Some replicas are not ready or available"
|
||||
inputPath: testdata/degraded.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "Waiting for initialization"
|
||||
inputPath: testdata/unknown.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "Statefulset is paused"
|
||||
inputPath: testdata/suspended.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "Statefulset needs manual intervention"
|
||||
inputPath: testdata/partition_suspended.yaml
|
||||
42
resource_customizations/apps.kruise.io/StatefulSet/testdata/degraded.yaml
vendored
Normal file
42
resource_customizations/apps.kruise.io/StatefulSet/testdata/degraded.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: apps.kruise.io/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: statefulset-test
|
||||
namespace: kruise
|
||||
generation: 5
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
minReadySeconds: 0
|
||||
paused: false
|
||||
partition: 0
|
||||
podUpdatePolicy: ReCreate
|
||||
type: RollingUpdate
|
||||
|
||||
status:
|
||||
observedGeneration: 5
|
||||
replicas: 2
|
||||
updatedAvailableReplicas: 1
|
||||
updatedReadyReplicas: 1
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-09-21T22:35:31Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: FailedCreatePod
|
||||
|
||||
41
resource_customizations/apps.kruise.io/StatefulSet/testdata/healthy.yaml
vendored
Normal file
41
resource_customizations/apps.kruise.io/StatefulSet/testdata/healthy.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
apiVersion: apps.kruise.io/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: statefulset-test
|
||||
namespace: kruise
|
||||
generation: 2
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
minReadySeconds: 0
|
||||
paused: false
|
||||
partition: 0
|
||||
podUpdatePolicy: ReCreate
|
||||
type: RollingUpdate
|
||||
|
||||
status:
|
||||
observedGeneration: 2
|
||||
replicas: 2
|
||||
updatedAvailableReplicas: 2
|
||||
updatedReadyReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-09-21T22:35:31Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'False'
|
||||
type: FailedCreatePod
|
||||
36
resource_customizations/apps.kruise.io/StatefulSet/testdata/partition_suspended.yaml
vendored
Normal file
36
resource_customizations/apps.kruise.io/StatefulSet/testdata/partition_suspended.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: apps.kruise.io/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: statefulset-test
|
||||
namespace: kruise
|
||||
generation: 3
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx:mainline
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
partition: 4
|
||||
|
||||
status:
|
||||
availableReplicas: 10
|
||||
currentReplicas: 4
|
||||
currentRevision: statefulset-test-d4d4fb5bd
|
||||
labelSelector: app=sample
|
||||
observedGeneration: 3
|
||||
readyReplicas: 10
|
||||
replicas: 10
|
||||
updateRevision: statefulset-test-56dfb978d4
|
||||
updatedAvailableReplicas: 7
|
||||
updatedReadyReplicas: 7
|
||||
updatedReplicas: 7
|
||||
36
resource_customizations/apps.kruise.io/StatefulSet/testdata/suspended.yaml
vendored
Normal file
36
resource_customizations/apps.kruise.io/StatefulSet/testdata/suspended.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: apps.kruise.io/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: statefulset-test
|
||||
namespace: kruise
|
||||
generation: 2
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
paused: true
|
||||
|
||||
status:
|
||||
observedGeneration: 2
|
||||
replicas: 2
|
||||
updatedAvailableReplicas: 2
|
||||
updatedReadyReplicas: 2
|
||||
conditions:
|
||||
- lastTransitionTime: "2021-09-21T22:35:31Z"
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'False'
|
||||
type: FailedCreatePod
|
||||
5
resource_customizations/apps.kruise.io/StatefulSet/testdata/unknown.yaml
vendored
Normal file
5
resource_customizations/apps.kruise.io/StatefulSet/testdata/unknown.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: apps.kruise.io/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: statefulset-test
|
||||
namespace: kruise
|
||||
@@ -0,0 +1,31 @@
|
||||
hs={ status = "Progressing", message = "Rollout is still progressing" }
|
||||
|
||||
if obj.metadata.generation == obj.status.observedGeneration then
|
||||
|
||||
if obj.status.canaryStatus.currentStepState == "StepUpgrade" and obj.status.phase == "Progressing" then
|
||||
hs.status = "Progressing"
|
||||
hs.message = "Rollout is still progressing"
|
||||
return hs
|
||||
end
|
||||
|
||||
if obj.status.canaryStatus.currentStepState == "StepPaused" and obj.status.phase == "Progressing" then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Rollout is Paused need manual intervention"
|
||||
return hs
|
||||
end
|
||||
|
||||
if obj.status.canaryStatus.currentStepState == "Completed" and obj.status.phase == "Healthy" then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "Rollout is Completed"
|
||||
return hs
|
||||
end
|
||||
|
||||
if obj.status.canaryStatus.currentStepState == "StepPaused" and (obj.status.phase == "Terminating" or obj.status.phase == "Disabled") then
|
||||
hs.status = "Degraded"
|
||||
hs.message = "Rollout is Disabled or Terminating"
|
||||
return hs
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
return hs
|
||||
@@ -0,0 +1,17 @@
|
||||
tests:
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "Rollout is Completed"
|
||||
inputPath: testdata/healthy.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "Rollout is Disabled or Terminating"
|
||||
inputPath: testdata/degraded.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "Rollout is still progressing"
|
||||
inputPath: testdata/progressing.yaml
|
||||
- healthStatus:
|
||||
status: Suspended
|
||||
message: "Rollout is Paused need manual intervention"
|
||||
inputPath: testdata/suspended.yaml
|
||||
50
resource_customizations/rollouts.kruise.io/Rollout/testdata/degraded.yaml
vendored
Normal file
50
resource_customizations/rollouts.kruise.io/Rollout/testdata/degraded.yaml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
apiVersion: rollouts.kruise.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
name: rollouts-demo
|
||||
namespace: default
|
||||
annotations:
|
||||
rollouts.kruise.io/rolling-style: partition
|
||||
generation: 5
|
||||
spec:
|
||||
objectRef:
|
||||
workloadRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: workload-demo
|
||||
strategy:
|
||||
canary:
|
||||
steps:
|
||||
- replicas: 1
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 50%
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 100%
|
||||
|
||||
status:
|
||||
canaryStatus:
|
||||
canaryReadyReplicas: 1
|
||||
canaryReplicas: 1
|
||||
canaryRevision: 76fd76f75b
|
||||
currentStepIndex: 1
|
||||
currentStepState: StepPaused
|
||||
lastUpdateTime: '2023-09-23T11:44:39Z'
|
||||
message: BatchRelease is at state Ready, rollout-id , step 1
|
||||
observedWorkloadGeneration: 7
|
||||
podTemplateHash: 76fd76f75b
|
||||
rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw
|
||||
stableRevision: 6bfdfb5bfb
|
||||
conditions:
|
||||
- lastTransitionTime: '2023-09-23T11:44:09Z'
|
||||
lastUpdateTime: '2023-09-23T11:44:09Z'
|
||||
message: Rollout is in Progressing
|
||||
reason: InRolling
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
message: >-
|
||||
Rollout is in step(1/3), and you need manually confirm to enter the next
|
||||
step
|
||||
observedGeneration: 5
|
||||
phase: Disabled
|
||||
56
resource_customizations/rollouts.kruise.io/Rollout/testdata/healthy.yaml
vendored
Normal file
56
resource_customizations/rollouts.kruise.io/Rollout/testdata/healthy.yaml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: rollouts.kruise.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
name: rollouts-demo
|
||||
namespace: default
|
||||
annotations:
|
||||
rollouts.kruise.io/rolling-style: partition
|
||||
generation: 7
|
||||
spec:
|
||||
objectRef:
|
||||
workloadRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: workload-demo
|
||||
strategy:
|
||||
canary:
|
||||
steps:
|
||||
- replicas: 1
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 50%
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 100%
|
||||
|
||||
status:
|
||||
canaryStatus:
|
||||
canaryReadyReplicas: 10
|
||||
canaryReplicas: 10
|
||||
canaryRevision: 76fd76f75b
|
||||
currentStepIndex: 3
|
||||
currentStepState: Completed
|
||||
lastUpdateTime: '2023-09-23T11:48:58Z'
|
||||
message: BatchRelease is at state Ready, rollout-id , step 3
|
||||
observedWorkloadGeneration: 22
|
||||
podTemplateHash: 76fd76f75b
|
||||
rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw
|
||||
stableRevision: 6bfdfb5bfb
|
||||
conditions:
|
||||
- lastTransitionTime: '2023-09-23T11:44:09Z'
|
||||
lastUpdateTime: '2023-09-23T11:44:09Z'
|
||||
message: Rollout progressing has been completed
|
||||
reason: Completed
|
||||
status: 'False'
|
||||
type: Progressing
|
||||
- lastTransitionTime: '2023-09-23T11:49:01Z'
|
||||
lastUpdateTime: '2023-09-23T11:49:01Z'
|
||||
message: ''
|
||||
reason: ''
|
||||
status: 'True'
|
||||
type: Succeeded
|
||||
message: Rollout progressing has been completed
|
||||
observedGeneration: 7
|
||||
phase: Healthy
|
||||
|
||||
|
||||
48
resource_customizations/rollouts.kruise.io/Rollout/testdata/progressing.yaml
vendored
Normal file
48
resource_customizations/rollouts.kruise.io/Rollout/testdata/progressing.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
apiVersion: rollouts.kruise.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
name: rollouts-demo
|
||||
namespace: default
|
||||
annotations:
|
||||
rollouts.kruise.io/rolling-style: partition
|
||||
generation: 5
|
||||
spec:
|
||||
objectRef:
|
||||
workloadRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: workload-demo
|
||||
strategy:
|
||||
canary:
|
||||
steps:
|
||||
- replicas: 1
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 50%
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 100%
|
||||
|
||||
status:
|
||||
canaryStatus:
|
||||
canaryReadyReplicas: 0
|
||||
canaryReplicas: 1
|
||||
canaryRevision: 76fd76f75b
|
||||
currentStepIndex: 1
|
||||
currentStepState: StepUpgrade
|
||||
lastUpdateTime: '2023-09-23T11:44:12Z'
|
||||
message: BatchRelease is at state Verifying, rollout-id , step 1
|
||||
observedWorkloadGeneration: 6
|
||||
podTemplateHash: 76fd76f75b
|
||||
rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw
|
||||
stableRevision: 6bfdfb5bfb
|
||||
conditions:
|
||||
- lastTransitionTime: '2023-09-23T11:44:09Z'
|
||||
lastUpdateTime: '2023-09-23T11:44:09Z'
|
||||
message: Rollout is in Progressing
|
||||
reason: InRolling
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
message: Rollout is in step(1/3), and upgrade workload to new version
|
||||
observedGeneration: 5
|
||||
phase: Progressing
|
||||
50
resource_customizations/rollouts.kruise.io/Rollout/testdata/suspended.yaml
vendored
Normal file
50
resource_customizations/rollouts.kruise.io/Rollout/testdata/suspended.yaml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
apiVersion: rollouts.kruise.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
name: rollouts-demo
|
||||
namespace: default
|
||||
annotations:
|
||||
rollouts.kruise.io/rolling-style: partition
|
||||
generation: 5
|
||||
spec:
|
||||
objectRef:
|
||||
workloadRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: workload-demo
|
||||
strategy:
|
||||
canary:
|
||||
steps:
|
||||
- replicas: 1
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 50%
|
||||
pause:
|
||||
duration: 0
|
||||
- replicas: 100%
|
||||
|
||||
status:
|
||||
canaryStatus:
|
||||
canaryReadyReplicas: 1
|
||||
canaryReplicas: 1
|
||||
canaryRevision: 76fd76f75b
|
||||
currentStepIndex: 1
|
||||
currentStepState: StepPaused
|
||||
lastUpdateTime: '2023-09-23T11:44:39Z'
|
||||
message: BatchRelease is at state Ready, rollout-id , step 1
|
||||
observedWorkloadGeneration: 7
|
||||
podTemplateHash: 76fd76f75b
|
||||
rolloutHash: 77cxd69w47b7bwddwv2w7vxvb4xxdbwcx9x289vw69w788w4w6z4x8dd4vbz2zbw
|
||||
stableRevision: 6bfdfb5bfb
|
||||
conditions:
|
||||
- lastTransitionTime: '2023-09-23T11:44:09Z'
|
||||
lastUpdateTime: '2023-09-23T11:44:09Z'
|
||||
message: Rollout is in Progressing
|
||||
reason: InRolling
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
message: >-
|
||||
Rollout is in step(1/3), and you need manually confirm to enter the next
|
||||
step
|
||||
observedGeneration: 5
|
||||
phase: Progressing
|
||||
@@ -197,6 +197,7 @@ type ArgoCDServer struct {
|
||||
|
||||
type ArgoCDServerOpts struct {
|
||||
DisableAuth bool
|
||||
ContentTypes []string
|
||||
EnableGZip bool
|
||||
Insecure bool
|
||||
StaticAssetsDir string
|
||||
@@ -989,6 +990,9 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWebHandl
|
||||
if a.EnableGZip {
|
||||
handler = compressHandler(handler)
|
||||
}
|
||||
if len(a.ContentTypes) > 0 {
|
||||
handler = enforceContentTypes(handler, a.ContentTypes)
|
||||
}
|
||||
mux.Handle("/api/", handler)
|
||||
|
||||
terminal := application.NewHandler(a.appLister, a.Namespace, a.ApplicationNamespaces, a.db, a.enf, a.Cache, appResourceTreeFn, a.settings.ExecShells, *a.sessionMgr).
|
||||
@@ -1055,6 +1059,20 @@ func (a *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWebHandl
|
||||
return &httpS
|
||||
}
|
||||
|
||||
func enforceContentTypes(handler http.Handler, types []string) http.Handler {
|
||||
allowedTypes := map[string]bool{}
|
||||
for _, t := range types {
|
||||
allowedTypes[strings.ToLower(t)] = true
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == http.MethodGet || allowedTypes[strings.ToLower(r.Header.Get("Content-Type"))] {
|
||||
handler.ServeHTTP(w, r)
|
||||
} else {
|
||||
http.Error(w, "Invalid content type", http.StatusUnsupportedMediaType)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// registerExtensions will try to register all configured extensions
|
||||
// in the given mux. If any error is returned while registering
|
||||
// extensions handlers, no route will be added in the given mux.
|
||||
|
||||
@@ -38,7 +38,7 @@ https://kubernetes.default.svc in-cluster %v Successful `, GetVe
|
||||
When().
|
||||
CreateApp()
|
||||
|
||||
tries := 2
|
||||
tries := 5
|
||||
for i := 0; i <= tries; i += 1 {
|
||||
clusterFixture.GivenWithSameState(t).
|
||||
When().
|
||||
|
||||
@@ -28,6 +28,7 @@ func DoHttpRequest(method string, path string, data ...byte) (*http.Response, er
|
||||
return nil, err
|
||||
}
|
||||
req.AddCookie(&http.Cookie{Name: common.AuthCookieName, Value: token})
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
|
||||
@@ -149,9 +149,9 @@ export const PodsLogsViewer = (props: PodLogsProps) => {
|
||||
const logsContent = (width: number, height: number, isWrapped: boolean) => (
|
||||
<div ref={logsContainerRef} onScroll={handleScroll} style={{width, height, overflow: 'scroll'}}>
|
||||
{logs.map((log, lineNum) => (
|
||||
<pre key={lineNum} style={{whiteSpace: isWrapped ? 'normal' : 'pre'}} className='noscroll'>
|
||||
<div key={lineNum} style={{whiteSpace: isWrapped ? 'normal' : 'pre', lineHeight: '16px'}} className='noscroll'>
|
||||
<Ansi>{renderLog(log, lineNum)}</Ansi>
|
||||
</pre>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user