mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-03-19 14:58:51 +01:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fcf5d8c238 | ||
|
|
1ee3c80bc8 | ||
|
|
a79fcad0e9 | ||
|
|
67e57a47a2 | ||
|
|
d99ee9d28b | ||
|
|
28a9225e7b | ||
|
|
f5d6b2972b | ||
|
|
06e2e0da9a | ||
|
|
a79e0eaca4 | ||
|
|
65461a1b61 | ||
|
|
2268f08819 | ||
|
|
a1a5c58a7d | ||
|
|
9c379af169 | ||
|
|
4e01115a48 | ||
|
|
eddf0a5f30 |
8
.github/workflows/release.yaml
vendored
8
.github/workflows/release.yaml
vendored
@@ -87,6 +87,14 @@ jobs:
|
||||
echo "KUBECTL_VERSION=$(go list -m k8s.io/client-go | head -n 1 | rev | cut -d' ' -f1 | rev)" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
|
||||
id: run-goreleaser
|
||||
|
||||
1
USERS.md
1
USERS.md
@@ -40,6 +40,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Bulder Bank](https://bulderbank.no)
|
||||
1. [CAM](https://cam-inc.co.jp)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Candis](https://www.candis.io)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
|
||||
@@ -24,15 +24,12 @@ import (
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/argoproj/argo-cd/v2/util/tls"
|
||||
"github.com/argoproj/argo-cd/v2/util/trace"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -147,7 +144,8 @@ func NewCommand() *cobra.Command {
|
||||
appController.InvalidateProjectsCache()
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterSharding := getClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -170,6 +168,7 @@ func NewCommand() *cobra.Command {
|
||||
applicationNamespaces,
|
||||
&workqueueRateLimit,
|
||||
serverSideDiff,
|
||||
enableDynamicClusterDistribution,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
@@ -235,56 +234,3 @@ func NewCommand() *cobra.Command {
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
func getClusterSharding(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterShardingCache {
|
||||
var replicasCount int
|
||||
// StatefulSet mode and Deployment mode uses different default values for shard number.
|
||||
defaultShardNumberValue := 0
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
|
||||
if err != nil && kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
}
|
||||
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
defaultShardNumberValue = -1
|
||||
} else {
|
||||
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, defaultShardNumberValue, -math.MaxInt32, math.MaxInt32)
|
||||
if replicasCount > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil {
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shardNumber, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
|
||||
if err != nil && !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shardNumber < 0 {
|
||||
var err error
|
||||
shardNumber, err = sharding.InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
if shardNumber > replicasCount {
|
||||
log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
|
||||
shardNumber = 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
return sharding.NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm)
|
||||
}
|
||||
|
||||
@@ -242,7 +242,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
|
||||
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json"), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json", env.StringFromEnvOpts{AllowEmpty: true}), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
|
||||
|
||||
@@ -1116,6 +1116,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
defer argoio.Close(conn)
|
||||
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
|
||||
errors.CheckError(err)
|
||||
|
||||
diffOption.local = local
|
||||
diffOption.localRepoRoot = localRepoRoot
|
||||
diffOption.cluster = cluster
|
||||
|
||||
@@ -114,7 +114,6 @@ type ApplicationController struct {
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
appStateManager AppStateManager
|
||||
stateCache statecache.LiveStateCache
|
||||
statusRefreshTimeout time.Duration
|
||||
@@ -131,6 +130,10 @@ type ApplicationController struct {
|
||||
clusterSharding sharding.ClusterShardingCache
|
||||
projByNameCache sync.Map
|
||||
applicationNamespaces []string
|
||||
|
||||
// dynamicClusterDistributionEnabled if disabled deploymentInformer is never initialized
|
||||
dynamicClusterDistributionEnabled bool
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
}
|
||||
|
||||
// NewApplicationController creates new instance of ApplicationController.
|
||||
@@ -156,6 +159,7 @@ func NewApplicationController(
|
||||
applicationNamespaces []string,
|
||||
rateLimiterConfig *ratelimiter.AppControllerRateLimiterConfig,
|
||||
serverSideDiff bool,
|
||||
dynamicClusterDistributionEnabled bool,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
@@ -164,28 +168,29 @@ func NewApplicationController(
|
||||
log.Info("Using default workqueue rate limiter config")
|
||||
}
|
||||
ctrl := ApplicationController{
|
||||
cache: argoCache,
|
||||
namespace: namespace,
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
statusRefreshJitter: appResyncJitter,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
cache: argoCache,
|
||||
namespace: namespace,
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
statusRefreshJitter: appResyncJitter,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
dynamicClusterDistributionEnabled: dynamicClusterDistributionEnabled,
|
||||
}
|
||||
if kubectlParallelismLimit > 0 {
|
||||
ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
|
||||
@@ -228,25 +233,33 @@ func NewApplicationController(
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
|
||||
deploymentInformer := factory.Apps().V1().Deployments()
|
||||
|
||||
var deploymentInformer informerv1.DeploymentInformer
|
||||
|
||||
// only initialize deployment informer if dynamic distribution is enabled
|
||||
if dynamicClusterDistributionEnabled {
|
||||
deploymentInformer = factory.Apps().V1().Deployments()
|
||||
}
|
||||
|
||||
readinessHealthCheck := func(r *http.Request) error {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
if dynamicClusterDistributionEnabled {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -774,7 +787,11 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
|
||||
go ctrl.appInformer.Run(ctx.Done())
|
||||
go ctrl.projInformer.Run(ctx.Done())
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
|
||||
if ctrl.dynamicClusterDistributionEnabled {
|
||||
// only start deployment informer if dynamic distribution is enabled
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
}
|
||||
|
||||
clusters, err := ctrl.db.ListClusters(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -157,6 +157,7 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
nil,
|
||||
|
||||
false,
|
||||
false,
|
||||
)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
|
||||
2
controller/cache/cache.go
vendored
2
controller/cache/cache.go
vendored
@@ -751,7 +751,7 @@ func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *appv1.Cluster) {
|
||||
c.clusterSharding.Update(newCluster)
|
||||
c.clusterSharding.Update(oldCluster, newCluster)
|
||||
c.lock.Lock()
|
||||
cluster, ok := c.clusters[newCluster.Server]
|
||||
c.lock.Unlock()
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/healthz"
|
||||
"github.com/argoproj/argo-cd/v2/util/profile"
|
||||
|
||||
ctrl_metrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
type MetricsServer struct {
|
||||
@@ -160,12 +162,12 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
|
||||
|
||||
mux := http.NewServeMux()
|
||||
registry := NewAppRegistry(appLister, appFilter, appLabels)
|
||||
registry.MustRegister(depth, adds, latency, workDuration, unfinished, longestRunningProcessor, retries)
|
||||
|
||||
mux.Handle(MetricsPath, promhttp.HandlerFor(prometheus.Gatherers{
|
||||
// contains app controller specific metrics
|
||||
registry,
|
||||
// contains process, golang and controller workqueues metrics
|
||||
prometheus.DefaultGatherer,
|
||||
// contains workqueue metrics, process and golang metrics
|
||||
ctrl_metrics.Registry,
|
||||
}, promhttp.HandlerOpts{}))
|
||||
profile.RegisterProfiler(mux)
|
||||
healthz.ServeHealthCheck(mux, healthCheck)
|
||||
|
||||
@@ -2,6 +2,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -15,12 +16,15 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
|
||||
appinformer "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions"
|
||||
applister "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
)
|
||||
|
||||
const fakeApp = `
|
||||
@@ -140,6 +144,12 @@ var appFilter = func(obj interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Create a fake controller so we initialize the internal controller metrics.
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/blob/4000e996a202917ad7d40f02ed8a2079a9ce25e9/pkg/internal/controller/metrics/metrics.go
|
||||
_, _ = controller.New("test-controller", nil, controller.Options{})
|
||||
}
|
||||
|
||||
func newFakeApp(fakeAppYAML string) *argoappv1.Application {
|
||||
var app argoappv1.Application
|
||||
err := yaml.Unmarshal([]byte(fakeAppYAML), &app)
|
||||
@@ -360,7 +370,7 @@ func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
assert.Contains(t, body, line, "expected metrics mismatch")
|
||||
assert.Contains(t, body, line, fmt.Sprintf("expected metrics mismatch for line: %s", line))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -443,3 +453,70 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa
|
||||
err = metricsServ.SetExpiration(time.Second)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestWorkqueueMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
# TYPE workqueue_adds_total counter
|
||||
workqueue_adds_total{name="test"}
|
||||
|
||||
# TYPE workqueue_depth gauge
|
||||
workqueue_depth{name="test"}
|
||||
|
||||
# TYPE workqueue_longest_running_processor_seconds gauge
|
||||
workqueue_longest_running_processor_seconds{name="test"}
|
||||
|
||||
# TYPE workqueue_queue_duration_seconds histogram
|
||||
|
||||
# TYPE workqueue_unfinished_work_seconds gauge
|
||||
workqueue_unfinished_work_seconds{name="test"}
|
||||
|
||||
# TYPE workqueue_work_duration_seconds histogram
|
||||
`
|
||||
workqueue.NewNamed("test")
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
metricsServ.Handler.ServeHTTP(rr, req)
|
||||
assert.Equal(t, rr.Code, http.StatusOK)
|
||||
body := rr.Body.String()
|
||||
log.Println(body)
|
||||
assertMetricsPrinted(t, expectedMetrics, body)
|
||||
}
|
||||
|
||||
func TestGoMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds_sum
|
||||
go_gc_duration_seconds_count
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines
|
||||
# TYPE go_info gauge
|
||||
go_info
|
||||
# TYPE go_memstats_alloc_bytes gauge
|
||||
go_memstats_alloc_bytes
|
||||
# TYPE go_memstats_sys_bytes gauge
|
||||
go_memstats_sys_bytes
|
||||
# TYPE go_threads gauge
|
||||
go_threads
|
||||
`
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
metricsServ.Handler.ServeHTTP(rr, req)
|
||||
assert.Equal(t, rr.Code, http.StatusOK)
|
||||
body := rr.Body.String()
|
||||
log.Println(body)
|
||||
assertMetricsPrinted(t, expectedMetrics, body)
|
||||
}
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
WorkQueueSubsystem = "workqueue"
|
||||
DepthKey = "depth"
|
||||
AddsKey = "adds_total"
|
||||
QueueLatencyKey = "queue_duration_seconds"
|
||||
WorkDurationKey = "work_duration_seconds"
|
||||
UnfinishedWorkKey = "unfinished_work_seconds"
|
||||
LongestRunningProcessorKey = "longest_running_processor_seconds"
|
||||
RetriesKey = "retries_total"
|
||||
)
|
||||
|
||||
var (
|
||||
depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: DepthKey,
|
||||
Help: "Current depth of workqueue",
|
||||
}, []string{"name"})
|
||||
|
||||
adds = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: AddsKey,
|
||||
Help: "Total number of adds handled by workqueue",
|
||||
}, []string{"name"})
|
||||
|
||||
latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: QueueLatencyKey,
|
||||
Help: "How long in seconds an item stays in workqueue before being requested",
|
||||
Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180},
|
||||
}, []string{"name"})
|
||||
|
||||
workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: WorkDurationKey,
|
||||
Help: "How long in seconds processing an item from workqueue takes.",
|
||||
Buckets: []float64{1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 30, 60, 120, 180},
|
||||
}, []string{"name"})
|
||||
|
||||
unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: UnfinishedWorkKey,
|
||||
Help: "How many seconds of work has been done that " +
|
||||
"is in progress and hasn't been observed by work_duration. Large " +
|
||||
"values indicate stuck threads. One can deduce the number of stuck " +
|
||||
"threads by observing the rate at which this increases.",
|
||||
}, []string{"name"})
|
||||
|
||||
longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: LongestRunningProcessorKey,
|
||||
Help: "How many seconds has the longest running " +
|
||||
"processor for workqueue been running.",
|
||||
}, []string{"name"})
|
||||
|
||||
retries = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Subsystem: WorkQueueSubsystem,
|
||||
Name: RetriesKey,
|
||||
Help: "Total number of retries handled by workqueue",
|
||||
}, []string{"name"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
workqueue.SetProvider(workqueueMetricsProvider{})
|
||||
}
|
||||
|
||||
type workqueueMetricsProvider struct{}
|
||||
|
||||
func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
|
||||
return depth.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
|
||||
return adds.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
|
||||
return latency.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
|
||||
return workDuration.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return unfinished.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return longestRunningProcessor.WithLabelValues(name)
|
||||
}
|
||||
|
||||
func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
||||
return retries.WithLabelValues(name)
|
||||
}
|
||||
@@ -12,7 +12,7 @@ type ClusterShardingCache interface {
|
||||
Init(clusters *v1alpha1.ClusterList)
|
||||
Add(c *v1alpha1.Cluster)
|
||||
Delete(clusterServer string)
|
||||
Update(c *v1alpha1.Cluster)
|
||||
Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster)
|
||||
IsManagedCluster(c *v1alpha1.Cluster) bool
|
||||
GetDistribution() map[string]int
|
||||
}
|
||||
@@ -26,7 +26,7 @@ type ClusterSharding struct {
|
||||
getClusterShard DistributionFunction
|
||||
}
|
||||
|
||||
func NewClusterSharding(db db.ArgoDB, shard, replicas int, shardingAlgorithm string) ClusterShardingCache {
|
||||
func NewClusterSharding(_ db.ArgoDB, shard, replicas int, shardingAlgorithm string) ClusterShardingCache {
|
||||
log.Debugf("Processing clusters from shard %d: Using filter function: %s", shard, shardingAlgorithm)
|
||||
clusterSharding := &ClusterSharding{
|
||||
Shard: shard,
|
||||
@@ -67,7 +67,8 @@ func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList) {
|
||||
defer sharding.lock.Unlock()
|
||||
newClusters := make(map[string]*v1alpha1.Cluster, len(clusters.Items))
|
||||
for _, c := range clusters.Items {
|
||||
newClusters[c.Server] = &c
|
||||
cluster := c
|
||||
newClusters[c.Server] = &cluster
|
||||
}
|
||||
sharding.Clusters = newClusters
|
||||
sharding.updateDistribution()
|
||||
@@ -96,13 +97,16 @@ func (sharding *ClusterSharding) Delete(clusterServer string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) Update(c *v1alpha1.Cluster) {
|
||||
func (sharding *ClusterSharding) Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster) {
|
||||
sharding.lock.Lock()
|
||||
defer sharding.lock.Unlock()
|
||||
|
||||
old, ok := sharding.Clusters[c.Server]
|
||||
sharding.Clusters[c.Server] = c
|
||||
if !ok || hasShardingUpdates(old, c) {
|
||||
if _, ok := sharding.Clusters[oldCluster.Server]; ok && oldCluster.Server != newCluster.Server {
|
||||
delete(sharding.Clusters, oldCluster.Server)
|
||||
delete(sharding.Shards, oldCluster.Server)
|
||||
}
|
||||
sharding.Clusters[newCluster.Server] = newCluster
|
||||
if hasShardingUpdates(oldCluster, newCluster) {
|
||||
sharding.updateDistribution()
|
||||
} else {
|
||||
log.Debugf("Skipping sharding distribution update. No relevant changes")
|
||||
@@ -111,8 +115,8 @@ func (sharding *ClusterSharding) Update(c *v1alpha1.Cluster) {
|
||||
|
||||
func (sharding *ClusterSharding) GetDistribution() map[string]int {
|
||||
sharding.lock.RLock()
|
||||
defer sharding.lock.RUnlock()
|
||||
shards := sharding.Shards
|
||||
sharding.lock.RUnlock()
|
||||
|
||||
distribution := make(map[string]int, len(shards))
|
||||
for k, v := range shards {
|
||||
@@ -122,9 +126,7 @@ func (sharding *ClusterSharding) GetDistribution() map[string]int {
|
||||
}
|
||||
|
||||
func (sharding *ClusterSharding) updateDistribution() {
|
||||
log.Info("Updating cluster shards")
|
||||
|
||||
for _, c := range sharding.Clusters {
|
||||
for k, c := range sharding.Clusters {
|
||||
shard := 0
|
||||
if c.Shard != nil {
|
||||
requestedShard := int(*c.Shard)
|
||||
@@ -136,24 +138,44 @@ func (sharding *ClusterSharding) updateDistribution() {
|
||||
} else {
|
||||
shard = sharding.getClusterShard(c)
|
||||
}
|
||||
var shard64 int64 = int64(shard)
|
||||
c.Shard = &shard64
|
||||
sharding.Shards[c.Server] = shard
|
||||
|
||||
existingShard, ok := sharding.Shards[k]
|
||||
if ok && existingShard != shard {
|
||||
log.Infof("Cluster %s has changed shard from %d to %d", k, existingShard, shard)
|
||||
} else if !ok {
|
||||
log.Infof("Cluster %s has been assigned to shard %d", k, shard)
|
||||
} else {
|
||||
log.Debugf("Cluster %s has not changed shard", k)
|
||||
}
|
||||
sharding.Shards[k] = shard
|
||||
}
|
||||
}
|
||||
|
||||
// hasShardingUpdates returns true if the sharding distribution has been updated.
|
||||
// nil checking is done for the corner case of the in-cluster cluster which may
|
||||
// have a nil shard assigned
|
||||
// hasShardingUpdates returns true if the sharding distribution has explicitly changed
|
||||
func hasShardingUpdates(old, new *v1alpha1.Cluster) bool {
|
||||
if old == nil || new == nil || (old.Shard == nil && new.Shard == nil) {
|
||||
if old == nil || new == nil {
|
||||
return false
|
||||
}
|
||||
return old.Shard != new.Shard
|
||||
|
||||
// returns true if the cluster id has changed because some sharding algorithms depend on it.
|
||||
if old.ID != new.ID {
|
||||
return true
|
||||
}
|
||||
|
||||
if old.Server != new.Server {
|
||||
return true
|
||||
}
|
||||
|
||||
// return false if the shard field has not been modified
|
||||
if old.Shard == nil && new.Shard == nil {
|
||||
return false
|
||||
}
|
||||
return old.Shard == nil || new.Shard == nil || int64(*old.Shard) != int64(*new.Shard)
|
||||
}
|
||||
|
||||
func (d *ClusterSharding) GetClusterAccessor() clusterAccessor {
|
||||
return func() []*v1alpha1.Cluster {
|
||||
// no need to lock, as this is only called from the updateDistribution function
|
||||
clusters := make([]*v1alpha1.Cluster, 0, len(d.Clusters))
|
||||
for _, c := range d.Clusters {
|
||||
clusters = append(clusters, c)
|
||||
|
||||
475
controller/sharding/cache_test.go
Normal file
475
controller/sharding/cache_test.go
Normal file
@@ -0,0 +1,475 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func setupTestSharding(shard int, replicas int) *ClusterSharding {
|
||||
shardingAlgorithm := "legacy" // we are using the legacy algorithm as it is deterministic based on the cluster id which is easier to test
|
||||
db := &dbmocks.ArgoDB{}
|
||||
return NewClusterSharding(db, shard, replicas, shardingAlgorithm).(*ClusterSharding)
|
||||
}
|
||||
|
||||
func TestNewClusterSharding(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
assert.NotNil(t, sharding)
|
||||
assert.Equal(t, shard, sharding.Shard)
|
||||
assert.Equal(t, replicas, sharding.Replicas)
|
||||
assert.NotNil(t, sharding.Shards)
|
||||
assert.NotNil(t, sharding.Clusters)
|
||||
}
|
||||
|
||||
func TestClusterSharding_Add(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
clusterA := &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}
|
||||
|
||||
sharding.Add(clusterA)
|
||||
|
||||
clusterB := v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}
|
||||
|
||||
sharding.Add(&clusterB)
|
||||
|
||||
distribution := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
|
||||
clusterDistribution, ok := distribution[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistribution)
|
||||
|
||||
myClusterDistribution, ok := distribution[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, myClusterDistribution)
|
||||
|
||||
assert.Equal(t, 2, len(distribution))
|
||||
}
|
||||
|
||||
func TestClusterSharding_AddRoundRobin_Redistributes(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
|
||||
db := &dbmocks.ArgoDB{}
|
||||
|
||||
sharding := NewClusterSharding(db, shard, replicas, "round-robin").(*ClusterSharding)
|
||||
|
||||
clusterA := &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}
|
||||
sharding.Add(clusterA)
|
||||
|
||||
clusterB := v1alpha1.Cluster{
|
||||
ID: "3",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}
|
||||
sharding.Add(&clusterB)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
|
||||
clusterDistributionA, ok := distributionBefore[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionA)
|
||||
|
||||
clusterDistributionB, ok := distributionBefore[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistributionB)
|
||||
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
clusterC := v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://1.1.1.1",
|
||||
}
|
||||
sharding.Add(&clusterC)
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
|
||||
assert.Contains(t, sharding.Clusters, clusterA.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterB.Server)
|
||||
assert.Contains(t, sharding.Clusters, clusterC.Server)
|
||||
|
||||
clusterDistributionA, ok = distributionAfter[clusterA.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionA)
|
||||
|
||||
clusterDistributionC, ok := distributionAfter[clusterC.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, clusterDistributionC) // will be assigned to shard 1 because the .ID is smaller then the "B" cluster
|
||||
|
||||
clusterDistributionB, ok = distributionAfter[clusterB.Server]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, clusterDistributionB) // will be reassigned to shard 0 because the .ID is bigger then the "C" cluster
|
||||
}
|
||||
|
||||
func TestClusterSharding_Delete(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
sharding.Delete("https://kubernetes.default.svc")
|
||||
distribution := sharding.GetDistribution()
|
||||
assert.Equal(t, 1, len(distribution))
|
||||
}
|
||||
|
||||
func TestClusterSharding_Update(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
distributionA, ok := distributionBefore["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, distributionA)
|
||||
|
||||
sharding.Update(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}, &v1alpha1.Cluster{
|
||||
ID: "4",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
})
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionAfter))
|
||||
|
||||
distributionA, ok = distributionAfter["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, distributionA)
|
||||
}
|
||||
|
||||
func TestClusterSharding_UpdateServerName(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
distributionBefore := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionBefore))
|
||||
|
||||
distributionA, ok := distributionBefore["https://kubernetes.default.svc"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 0, distributionA)
|
||||
|
||||
sharding.Update(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}, &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server2",
|
||||
})
|
||||
|
||||
distributionAfter := sharding.GetDistribution()
|
||||
assert.Equal(t, 2, len(distributionAfter))
|
||||
|
||||
_, ok = distributionAfter["https://kubernetes.default.svc"]
|
||||
assert.False(t, ok) // the old server name should not be present anymore
|
||||
|
||||
_, ok = distributionAfter["https://server2"]
|
||||
assert.True(t, ok) // the new server name should be present
|
||||
}
|
||||
|
||||
func TestClusterSharding_IsManagedCluster(t *testing.T) {
|
||||
replicas := 2
|
||||
sharding0 := setupTestSharding(0, replicas)
|
||||
|
||||
sharding0.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.True(t, sharding0.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}))
|
||||
|
||||
assert.False(t, sharding0.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}))
|
||||
|
||||
sharding1 := setupTestSharding(1, replicas)
|
||||
|
||||
sharding1.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
},
|
||||
{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.False(t, sharding1.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
}))
|
||||
|
||||
assert.True(t, sharding1.IsManagedCluster(&v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
func TestClusterSharding_ClusterShardOfResourceShouldNotBeChanged(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
sharding := setupTestSharding(shard, replicas)
|
||||
|
||||
Int64Ptr := func(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
clusterWithNil := &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://127.0.0.1:6443",
|
||||
Shard: nil,
|
||||
}
|
||||
|
||||
clusterWithValue := &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
}
|
||||
|
||||
clusterWithToBigValue := &v1alpha1.Cluster{
|
||||
ID: "3",
|
||||
Server: "https://1.1.1.1",
|
||||
Shard: Int64Ptr(999), // shard value is explicitly bigger than the number of replicas
|
||||
}
|
||||
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{
|
||||
Items: []v1alpha1.Cluster{
|
||||
*clusterWithNil,
|
||||
*clusterWithValue,
|
||||
*clusterWithToBigValue,
|
||||
},
|
||||
},
|
||||
)
|
||||
distribution := sharding.GetDistribution()
|
||||
assert.Equal(t, 3, len(distribution))
|
||||
|
||||
assert.Nil(t, sharding.Clusters[clusterWithNil.Server].Shard)
|
||||
|
||||
assert.NotNil(t, sharding.Clusters[clusterWithValue.Server].Shard)
|
||||
assert.Equal(t, int64(1), *sharding.Clusters[clusterWithValue.Server].Shard)
|
||||
assert.Equal(t, 1, distribution[clusterWithValue.Server])
|
||||
|
||||
assert.NotNil(t, sharding.Clusters[clusterWithToBigValue.Server].Shard)
|
||||
assert.Equal(t, int64(999), *sharding.Clusters[clusterWithToBigValue.Server].Shard)
|
||||
assert.Equal(t, 0, distribution[clusterWithToBigValue.Server]) // will be assigned to shard 0 because the value is bigger than the number of replicas
|
||||
}
|
||||
|
||||
func TestHasShardingUpdates(t *testing.T) {
|
||||
Int64Ptr := func(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
old *v1alpha1.Cluster
|
||||
new *v1alpha1.Cluster
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "No updates",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Updates",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(1),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Old is nil",
|
||||
old: nil,
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "New is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Both are nil",
|
||||
old: nil,
|
||||
new: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Both shards are nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Old shard is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "New shard is nil",
|
||||
old: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: nil,
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Cluster ID has changed",
|
||||
old: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
ID: "2",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Server has changed",
|
||||
old: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server1",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
new: &v1alpha1.Cluster{
|
||||
ID: "1",
|
||||
Server: "https://server2",
|
||||
Shard: Int64Ptr(2),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, hasShardingUpdates(tc.old, tc.new))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
log "github.com/sirupsen/logrus"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -206,7 +208,7 @@ func createClusterIndexByClusterIdMap(getCluster clusterAccessor) map[string]int
|
||||
// The function takes the shard number from the environment variable (default value -1, if not set) and passes it to this function.
|
||||
// If the shard value passed to this function is -1, that is, the shard was not set as an environment variable,
|
||||
// we default the shard number to 0 for computing the default config map.
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient kubernetes.Interface, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
hostname, err := osHostnameFunction()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
@@ -363,3 +365,59 @@ func getDefaultShardMappingData(replicas int) []shardApplicationControllerMappin
|
||||
}
|
||||
return shardMappingData
|
||||
}
|
||||
|
||||
func GetClusterSharding(kubeClient kubernetes.Interface, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) (ClusterShardingCache, error) {
|
||||
var replicasCount int
|
||||
if enableDynamicClusterDistribution {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if app controller deployment is not found when dynamic cluster distribution is enabled error out
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: %v", err)
|
||||
}
|
||||
|
||||
if appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
return nil, fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment replica count")
|
||||
}
|
||||
|
||||
} else {
|
||||
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if replicasCount > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution {
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shardNumber, err = GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
|
||||
if err != nil && !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shardNumber < 0 {
|
||||
var err error
|
||||
shardNumber, err = InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
if shardNumber > replicasCount {
|
||||
log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
|
||||
shardNumber = 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
shardNumber = 0
|
||||
}
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
return NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm), nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -12,10 +13,14 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestGetShardByID_NotEmptyID(t *testing.T) {
|
||||
@@ -681,3 +686,187 @@ func Test_getOrUpdateShardNumberForController(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClusterSharding(t *testing.T) {
|
||||
IntPtr := func(i int32) *int32 {
|
||||
return &i
|
||||
}
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.DefaultApplicationControllerName,
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: IntPtr(1),
|
||||
},
|
||||
}
|
||||
|
||||
deploymentMultiReplicas := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-application-controller-multi-replicas",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: IntPtr(3),
|
||||
},
|
||||
}
|
||||
|
||||
objects := append([]runtime.Object{}, deployment, deploymentMultiReplicas)
|
||||
kubeclientset := kubefake.NewSimpleClientset(objects...)
|
||||
|
||||
settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "argocd", settings.WithRepoOrClusterChangedHandler(func() {
|
||||
}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
useDynamicSharding bool
|
||||
envsSetter func(t *testing.T)
|
||||
cleanup func()
|
||||
expectedShard int
|
||||
expectedReplicas int
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "Default sharding with statefulset",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Default sharding with deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Default sharding with deployment and multiple replicas",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Statefulset multiple replicas",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "3")
|
||||
osHostnameFunction = func() (string, error) { return "example-shard-3", nil }
|
||||
},
|
||||
cleanup: func() {
|
||||
osHostnameFunction = os.Hostname
|
||||
},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 3,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 1 replica",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 2 replica - and to high shard",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 2,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with statefulset and 2 replica",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
t.Setenv(common.EnvControllerShard, "1")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: false,
|
||||
expectedShard: 1,
|
||||
expectedReplicas: 2,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Explicit shard with deployment and multiple replicas will read from configmap",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
|
||||
t.Setenv(common.EnvControllerShard, "3")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 3,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Dynamic sharding but missing deployment",
|
||||
envsSetter: func(t *testing.T) {
|
||||
t.Setenv(common.EnvAppControllerName, "missing-deployment")
|
||||
},
|
||||
cleanup: func() {},
|
||||
useDynamicSharding: true,
|
||||
expectedShard: 0,
|
||||
expectedReplicas: 1,
|
||||
expectedErr: fmt.Errorf("(dymanic cluster distribution) failed to get app controller deployment: deployments.apps \"missing-deployment\" not found"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.envsSetter(t)
|
||||
defer tc.cleanup()
|
||||
shardingCache, err := GetClusterSharding(kubeclientset, settingsMgr, "round-robin", tc.useDynamicSharding)
|
||||
|
||||
if shardingCache != nil {
|
||||
clusterSharding := shardingCache.(*ClusterSharding)
|
||||
assert.Equal(t, tc.expectedShard, clusterSharding.Shard)
|
||||
assert.Equal(t, tc.expectedReplicas, clusterSharding.Replicas)
|
||||
}
|
||||
|
||||
if tc.expectedErr != nil {
|
||||
if err != nil {
|
||||
assert.Equal(t, tc.expectedErr.Error(), err.Error())
|
||||
} else {
|
||||
t.Errorf("Expected error %v but got nil", tc.expectedErr)
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ To be able to send notifications with argocd-notifications you have to create an
|
||||
8. Give your integration a name, copy the "API key" and safe it somewhere for later
|
||||
9. Make sure the checkboxes for "Create and Update Access" and "enable" are selected, disable the other checkboxes to remove unnecessary permissions
|
||||
10. Click "Safe Integration" at the bottom
|
||||
11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the us/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (european api).
|
||||
12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the US/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (European API).
|
||||
12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the Opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
# Pagerduty
|
||||
# PagerDuty
|
||||
|
||||
## Parameters
|
||||
|
||||
The Pagerduty notification service is used to create pagerduty incidents and requires specifying the following settings:
|
||||
The PagerDuty notification service is used to create PagerDuty incidents and requires specifying the following settings:
|
||||
|
||||
* `pagerdutyToken` - the pagerduty auth token
|
||||
* `pagerdutyToken` - the PagerDuty auth token
|
||||
* `from` - email address of a valid user associated with the account making the request.
|
||||
* `serviceID` - The ID of the resource.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
The following snippet contains sample Pagerduty service configuration:
|
||||
The following snippet contains sample PagerDuty service configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -35,7 +35,7 @@ data:
|
||||
|
||||
## Template
|
||||
|
||||
[Notification templates](../templates.md) support specifying subject for pagerduty notifications:
|
||||
[Notification templates](../templates.md) support specifying subject for PagerDuty notifications:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -62,5 +62,5 @@ apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "<serviceID for Pagerduty>"
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "<serviceID for PagerDuty>"
|
||||
```
|
||||
|
||||
@@ -74,5 +74,5 @@ apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "<serviceID for Pagerduty>"
|
||||
notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "<serviceID for PagerDuty>"
|
||||
```
|
||||
|
||||
@@ -13,4 +13,4 @@ before enabling `managedNamespaceMetadata` on an existing namespace.
|
||||
|
||||
## Upgraded Helm Version
|
||||
|
||||
Note that bundled Helm version has been upgraded from 3.13.2 to 3.14.0.
|
||||
Note that bundled Helm version has been upgraded from 3.13.2 to 3.14.2.
|
||||
|
||||
35
go.mod
35
go.mod
@@ -14,9 +14,9 @@ require (
|
||||
github.com/alicebob/miniredis/v2 v2.30.4
|
||||
github.com/antonmedv/expr v1.15.2
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240122213038-792124280fcc
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1
|
||||
github.com/aws/aws-sdk-go v1.44.317
|
||||
github.com/aws/aws-sdk-go v1.50.8
|
||||
github.com/bmatcuk/doublestar/v4 v4.6.0
|
||||
github.com/bombsimon/logrusr/v2 v2.0.1
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.6.0
|
||||
@@ -92,7 +92,7 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.26.11
|
||||
k8s.io/apiextensions-apiserver v0.26.4
|
||||
k8s.io/apiextensions-apiserver v0.26.10
|
||||
k8s.io/apimachinery v0.26.11
|
||||
k8s.io/apiserver v0.26.11
|
||||
k8s.io/client-go v0.26.11
|
||||
@@ -103,7 +103,7 @@ require (
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
|
||||
layeh.com/gopher-json v0.0.0-20190114024228-97fed8db8427
|
||||
oras.land/oras-go/v2 v2.3.0
|
||||
sigs.k8s.io/controller-runtime v0.14.6
|
||||
sigs.k8s.io/controller-runtime v0.14.7
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
@@ -114,19 +114,20 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/aws/smithy-go v1.19.0 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/google/s2a-go v0.1.4 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
|
||||
|
||||
66
go.sum
66
go.sum
@@ -696,8 +696,8 @@ github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240122213038-792124280fcc h1:Fv94Mi2WvtvPkEH5WoWC3iy/VoQRLeSsE0hyg0n2UkY=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20240122213038-792124280fcc/go.mod h1:gWE8uROi7hIkWGNAVM+8FWkMfo0vZ03SLx/aFw/DBzg=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9 h1:1lt0VXzmLK7Vv0kaeal3S6/JIfzPyBORkUWXhiqF3l0=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231027194313-a8d185ecc0a9/go.mod h1:E/vv4+by868m0mmflaRfGBmKBtAupoF+mmyfekP8QCk=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604 h1:pMfBao6Vm1Ax0xGIp9BWEia2nKkccHwV0dTEdrsFOpo=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20240126143042-84b9f7913604/go.mod h1:TsyusmXQWIL0ST7YMRG/ered7WlWDmbmnPpXnS2LJmM=
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1 h1:qsHwwOJ21K2Ao0xPju1sNuqphyMnMYkyB3ZLoLtxWpo=
|
||||
github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1/go.mod h1:CZHlkyAD1/+FbEn6cB2DQTj48IoLGvEYsWEvtzP3238=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@@ -713,35 +713,37 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU=
|
||||
github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.50.8 h1:gY0WoOW+/Wz6XmYSgDH9ge3wnAevYDSQWPxxJvqAkP4=
|
||||
github.com/aws/aws-sdk-go v1.50.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8 h1:lDpy0WM8AHsywOnVrOHaSMfpaiV2igOw8D7svkFkXVA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8/go.mod h1:5XCmmyutmzzgkpk/6NYTjeWb6lgo9N170m1j6pQkIBs=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 h1:vTrwTvv5qAwjWIGhZDSBH/oQHuIQjGmD232k01FUh6A=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8/go.mod h1:lVa4OHbvgjVot4gmh1uouF1ubgexSCN92P6CJQpT0t8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 h1:KeTxcGdNnQudb46oOl4d90f2I33DF/c6q3RnZAmvQdQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28/go.mod h1:yRZVr/iT0AqyHeep00SZ4YfBAKojXz08w3XMBscdi0c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 h1:tQoMg8i4nFAB70cJ4wiAYEiZRYo2P6uDmU2D6ys/igo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0/go.mod h1:jQhN5f4p3PALMNlUtfb/0wGIFlV7vGtJlPDVfxfNfPY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 h1:/2gzjhQowRLarkkBOGPXSRnb8sQ2RVsjdG1C/UliK/c=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0/go.mod h1:wo/B7uUm/7zw/dWhBJ4FXuw1sySU5lyIhVg1Bu2yL9A=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 h1:Jfly6mRxk2ZOSlbCvZfKNS7TukSx1mIzhSsqZ/IGSZI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0/go.mod h1:TZSH7xLO7+phDtViY/KUp9WGCJMQkLJ/VpgkTFd5gh8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 h1:kOO++CYo50RcTFISESluhWEi5Prhg+gaSs4whWabiZU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12 h1:mF4cMuNh/2G+d19nWnm1vJ/ak0qK6SbqF0KtSX9pxu0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.12/go.mod h1:lOvvqtZP9p29GIjOTuA/76HiVk0c/s8qRcFRq2+E2uc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 h1:tRNrFDGRm81e6nTX5Q4CFblea99eAfm0dxXazGpLceU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7/go.mod h1:8GWUDux5Z2h6z2efAtr54RdHXtLm8sq7Rg85ZNY/CZM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
|
||||
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
|
||||
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@@ -2704,8 +2706,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/controller-runtime v0.14.7 h1:Vrnm2vk9ZFlRkXATHz0W0wXcqNl7kPat8q2JyxVy0Q8=
|
||||
sigs.k8s.io/controller-runtime v0.14.7/go.mod h1:ErTs3SJCOujNUnTz4AS+uh8hp6DHMo1gj6fFndJT1X8=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
75496ea824f92305ff7d28af37f4af57536bf5138399c824dff997b9d239dd42 helm-v3.14.1-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f865b8ad4228fd0990bbc5b50615eb6cb9eb31c9a9ca7238401ed897bbbe9033 helm-v3.14.1-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
4d853ab8fe3462287c7272fbadd5f73531ecdd6fa0db37d31630e41ae1ae21de helm-v3.14.1-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
19bf07999c7244bfeb0fd27152919b9faa1148cf43910edbb98efa9150058a98 helm-v3.14.1-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
0885a501d586c1e949e9b113bf3fb3290b0bbf74db9444a1d8c2723a143006a5 helm-v3.14.2-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
c65d6a9557bb359abc2c0d26670de850b52327dc3976ad6f9e14c298ea3e1b61 helm-v3.14.2-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f3bc8582ff151e619cd285d9cdf9fef1c5733ee5522d8bed2ef680ef07f87223 helm-v3.14.2-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
7bda34aa26638e5116b31385f3b781172572175bf4c1ae00c87d8b154458ed94 helm-v3.14.2-linux-s390x.tar.gz
|
||||
@@ -11,7 +11,7 @@
|
||||
# Use ./hack/installers/checksums/add-helm-checksums.sh and
|
||||
# add-kustomize-checksums.sh to help download checksums.
|
||||
###############################################################################
|
||||
helm3_version=3.14.0
|
||||
helm3_version=3.14.2
|
||||
kubectl_version=1.17.8
|
||||
kubectx_version=0.6.3
|
||||
kustomize5_version=5.2.1
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.10.0
|
||||
newTag: v2.10.2
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -0,0 +1,88 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
name: argocd-applicationset-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applications
|
||||
- applicationsets
|
||||
- applicationsets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applicationsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- appprojects
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
@@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
name: argocd-applicationset-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-applicationset-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-applicationset-controller
|
||||
namespace: argocd
|
||||
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- argocd-applicationset-controller-clusterrole.yaml
|
||||
- argocd-applicationset-controller-clusterrolebinding.yaml
|
||||
@@ -3,4 +3,5 @@ kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./applicationset-controller
|
||||
- ./server
|
||||
|
||||
@@ -21026,7 +21026,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -21350,7 +21350,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -21402,7 +21402,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -21663,7 +21663,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.10.0
|
||||
newTag: v2.10.2
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v2.10.0
|
||||
newTag: v2.10.2
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
@@ -20855,6 +20855,95 @@ rules:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applications
|
||||
- applicationsets
|
||||
- applicationsets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applicationsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- appprojects
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -21036,6 +21125,23 @@ subjects:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-applicationset-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-applicationset-controller
|
||||
namespace: argocd
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -22283,7 +22389,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -22406,7 +22512,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -22488,7 +22594,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -22843,7 +22949,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -22895,7 +23001,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -23214,7 +23320,7 @@ spec:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -23502,7 +23608,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -1668,7 +1668,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1791,7 +1791,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1873,7 +1873,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2228,7 +2228,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2280,7 +2280,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2599,7 +2599,7 @@ spec:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2887,7 +2887,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -20814,6 +20814,95 @@ rules:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applications
|
||||
- applicationsets
|
||||
- applicationsets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applicationsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- appprojects
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -20963,6 +21052,23 @@ subjects:
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: applicationset-controller
|
||||
app.kubernetes.io/name: argocd-applicationset-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-applicationset-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-applicationset-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-applicationset-controller
|
||||
namespace: argocd
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: server
|
||||
@@ -21378,7 +21484,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -21501,7 +21607,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -21583,7 +21689,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -21889,7 +21995,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -21941,7 +22047,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -22258,7 +22364,7 @@ spec:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -22546,7 +22652,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -763,7 +763,7 @@ spec:
|
||||
key: applicationsetcontroller.enable.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -886,7 +886,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -968,7 +968,7 @@ spec:
|
||||
key: notificationscontroller.selfservice.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1274,7 +1274,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1326,7 +1326,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1643,7 +1643,7 @@ spec:
|
||||
key: server.api.content.types
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1931,7 +1931,7 @@ spec:
|
||||
key: controller.diff.server.side
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.10.0
|
||||
image: quay.io/argoproj/argocd:v2.10.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -1389,7 +1389,7 @@ func GenerateManifests(ctx context.Context, appPath, repoRoot, revision string,
|
||||
if q.KustomizeOptions != nil {
|
||||
kustomizeBinary = q.KustomizeOptions.BinaryPath
|
||||
}
|
||||
k := kustomize.NewKustomizeApp(appPath, q.Repo.GetGitCreds(gitCredsStore), repoURL, kustomizeBinary)
|
||||
k := kustomize.NewKustomizeApp(repoRoot, appPath, q.Repo.GetGitCreds(gitCredsStore), repoURL, kustomizeBinary)
|
||||
targetObjs, _, err = k.Build(q.ApplicationSource.Kustomize, q.KustomizeOptions, env)
|
||||
case v1alpha1.ApplicationSourceTypePlugin:
|
||||
pluginName := ""
|
||||
@@ -1976,7 +1976,7 @@ func (s *Service) GetAppDetails(ctx context.Context, q *apiclient.RepoServerAppD
|
||||
return err
|
||||
}
|
||||
case v1alpha1.ApplicationSourceTypeKustomize:
|
||||
if err := populateKustomizeAppDetails(res, q, opContext.appPath, commitSHA, s.gitCredsStore); err != nil {
|
||||
if err := populateKustomizeAppDetails(res, q, repoRoot, opContext.appPath, commitSHA, s.gitCredsStore); err != nil {
|
||||
return err
|
||||
}
|
||||
case v1alpha1.ApplicationSourceTypePlugin:
|
||||
@@ -2117,13 +2117,13 @@ func walkHelmValueFilesInPath(root string, valueFiles *[]string) filepath.WalkFu
|
||||
}
|
||||
}
|
||||
|
||||
func populateKustomizeAppDetails(res *apiclient.RepoAppDetailsResponse, q *apiclient.RepoServerAppDetailsQuery, appPath string, reversion string, credsStore git.CredsStore) error {
|
||||
func populateKustomizeAppDetails(res *apiclient.RepoAppDetailsResponse, q *apiclient.RepoServerAppDetailsQuery, repoRoot string, appPath string, reversion string, credsStore git.CredsStore) error {
|
||||
res.Kustomize = &apiclient.KustomizeAppSpec{}
|
||||
kustomizeBinary := ""
|
||||
if q.KustomizeOptions != nil {
|
||||
kustomizeBinary = q.KustomizeOptions.BinaryPath
|
||||
}
|
||||
k := kustomize.NewKustomizeApp(appPath, q.Repo.GetGitCreds(credsStore), q.Repo.Repo, kustomizeBinary)
|
||||
k := kustomize.NewKustomizeApp(repoRoot, appPath, q.Repo.GetGitCreds(credsStore), q.Repo.Repo, kustomizeBinary)
|
||||
fakeManifestRequest := apiclient.ManifestRequest{
|
||||
AppName: q.AppName,
|
||||
Namespace: "", // FIXME: omit it for now
|
||||
|
||||
@@ -748,7 +748,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
|
||||
And(func(app *Application) {
|
||||
diffOutput, err := RunCli("app", "diff", ctx.AppQualifiedName(), "--local", "testdata/guestbook")
|
||||
diffOutput, err := RunCli("app", "diff", ctx.AppQualifiedName(), "--local-repo-root", ".", "--local", "testdata/guestbook")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", DeploymentNamespace()))
|
||||
}).
|
||||
@@ -761,7 +761,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(app *Application) {
|
||||
diffOutput, err := RunCli("app", "diff", ctx.AppQualifiedName(), "--local", "testdata/guestbook")
|
||||
diffOutput, err := RunCli("app", "diff", ctx.AppQualifiedName(), "--local-repo-root", ".", "--local", "testdata/guestbook")
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, diffOutput)
|
||||
}).
|
||||
@@ -897,7 +897,7 @@ func testNSEdgeCasesApplicationResources(t *testing.T, appPath string, statusCod
|
||||
expect.
|
||||
Expect(HealthIs(statusCode)).
|
||||
And(func(app *Application) {
|
||||
diffOutput, err := RunCli("app", "diff", ctx.AppQualifiedName(), "--local", path.Join("testdata", appPath))
|
||||
diffOutput, err := RunCli("app", "diff", ctx.AppQualifiedName(), "--local-repo-root", ".", "--local", path.Join("testdata", appPath))
|
||||
assert.Empty(t, diffOutput)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
@@ -998,7 +998,7 @@ func TestNamespacedLocalManifestSync(t *testing.T) {
|
||||
Given().
|
||||
LocalPath(guestbookPathLocal).
|
||||
When().
|
||||
Sync().
|
||||
Sync("--local-repo-root", ".").
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(app *Application) {
|
||||
@@ -1066,7 +1066,7 @@ func TestNamespacedLocalSyncDryRunWithASEnabled(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
appBefore := app.DeepCopy()
|
||||
_, err = RunCli("app", "sync", app.QualifiedName(), "--dry-run", "--local", guestbookPathLocal)
|
||||
_, err = RunCli("app", "sync", app.QualifiedName(), "--dry-run", "--local-repo-root", ".", "--local", guestbookPathLocal)
|
||||
assert.NoError(t, err)
|
||||
|
||||
appAfter := app.DeepCopy()
|
||||
|
||||
@@ -1324,7 +1324,7 @@ func TestLocalManifestSync(t *testing.T) {
|
||||
Given().
|
||||
LocalPath(guestbookPathLocal).
|
||||
When().
|
||||
Sync().
|
||||
Sync("--local-repo-root", ".").
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(app *Application) {
|
||||
@@ -1385,7 +1385,7 @@ func TestLocalSyncDryRunWithAutosyncEnabled(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
appBefore := app.DeepCopy()
|
||||
_, err = RunCli("app", "sync", app.Name, "--dry-run", "--local", guestbookPathLocal)
|
||||
_, err = RunCli("app", "sync", app.Name, "--dry-run", "--local-repo-root", ".", "--local", guestbookPathLocal)
|
||||
assert.NoError(t, err)
|
||||
|
||||
appAfter := app.DeepCopy()
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
.applications-tiles {
|
||||
display: grid;
|
||||
gap: 24px;
|
||||
grid-template-columns: repeat(auto-fill,minmax(380px,1fr));
|
||||
grid-template-columns: repeat(auto-fill,minmax(370px,1fr));
|
||||
padding: 0 12px;
|
||||
|
||||
&__wrapper {
|
||||
|
||||
@@ -105,9 +105,7 @@ export const ApplicationTiles = ({applications, syncApplication, refreshApplicat
|
||||
{pref => {
|
||||
const favList = pref.appList.favoritesAppList || [];
|
||||
return (
|
||||
<div
|
||||
className='applications-tiles argo-table-list argo-table-list--clickable row small-up-1 medium-up-2 large-up-3 xxxlarge-up-4'
|
||||
ref={appContainerRef}>
|
||||
<div className='applications-tiles argo-table-list argo-table-list--clickable' ref={appContainerRef}>
|
||||
{applications.map((app, i) => {
|
||||
const source = getAppDefaultSource(app);
|
||||
return (
|
||||
|
||||
13
util/env/env.go
vendored
13
util/env/env.go
vendored
@@ -151,8 +151,17 @@ func ParseDurationFromEnv(env string, defaultValue, min, max time.Duration) time
|
||||
return dur
|
||||
}
|
||||
|
||||
func StringFromEnv(env string, defaultValue string) string {
|
||||
if str := os.Getenv(env); str != "" {
|
||||
type StringFromEnvOpts struct {
|
||||
// AllowEmpty allows the value to be empty as long as the environment variable is set.
|
||||
AllowEmpty bool
|
||||
}
|
||||
|
||||
func StringFromEnv(env string, defaultValue string, opts ...StringFromEnvOpts) string {
|
||||
opt := StringFromEnvOpts{}
|
||||
for _, o := range opts {
|
||||
opt.AllowEmpty = opt.AllowEmpty || o.AllowEmpty
|
||||
}
|
||||
if str, ok := os.LookupEnv(env); opt.AllowEmpty && ok || str != "" {
|
||||
return str
|
||||
}
|
||||
return defaultValue
|
||||
|
||||
19
util/env/env_test.go
vendored
19
util/env/env_test.go
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestParseNumFromEnv(t *testing.T) {
|
||||
@@ -167,19 +168,25 @@ func TestStringFromEnv(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
env string
|
||||
env *string
|
||||
expected string
|
||||
def string
|
||||
opts []StringFromEnvOpts
|
||||
}{
|
||||
{"Some string", "true", "true", def},
|
||||
{"Empty string with default", "", def, def},
|
||||
{"Empty string without default", "", "", ""},
|
||||
{"Some string", pointer.String("true"), "true", def, nil},
|
||||
{"Empty string with default", pointer.String(""), def, def, nil},
|
||||
{"Empty string without default", pointer.String(""), "", "", nil},
|
||||
{"No env variable with default allow empty", nil, "default", "default", []StringFromEnvOpts{{AllowEmpty: true}}},
|
||||
{"Some variable with default allow empty", pointer.String("true"), "true", "default", []StringFromEnvOpts{{AllowEmpty: true}}},
|
||||
{"Empty variable with default allow empty", pointer.String(""), "", "default", []StringFromEnvOpts{{AllowEmpty: true}}},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv(envKey, tt.env)
|
||||
b := StringFromEnv(envKey, tt.def)
|
||||
if tt.env != nil {
|
||||
t.Setenv(envKey, *tt.env)
|
||||
}
|
||||
b := StringFromEnv(envKey, tt.def, tt.opts...)
|
||||
assert.Equal(t, tt.expected, b)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -91,6 +91,28 @@ func (c *Cmd) RegistryLogin(repo string, creds Creds) (string, error) {
|
||||
args = append(args, "--password", creds.Password)
|
||||
}
|
||||
|
||||
if creds.CAPath != "" {
|
||||
args = append(args, "--ca-file", creds.CAPath)
|
||||
}
|
||||
|
||||
if len(creds.CertData) > 0 {
|
||||
filePath, closer, err := writeToTmp(creds.CertData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer argoio.Close(closer)
|
||||
args = append(args, "--cert-file", filePath)
|
||||
}
|
||||
|
||||
if len(creds.KeyData) > 0 {
|
||||
filePath, closer, err := writeToTmp(creds.KeyData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer argoio.Close(closer)
|
||||
args = append(args, "--key-file", filePath)
|
||||
}
|
||||
|
||||
if creds.InsecureSkipVerify {
|
||||
args = append(args, "--insecure")
|
||||
}
|
||||
@@ -238,6 +260,25 @@ func (c *Cmd) PullOCI(repo string, chart string, version string, destination str
|
||||
if creds.CAPath != "" {
|
||||
args = append(args, "--ca-file", creds.CAPath)
|
||||
}
|
||||
|
||||
if len(creds.CertData) > 0 {
|
||||
filePath, closer, err := writeToTmp(creds.CertData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer argoio.Close(closer)
|
||||
args = append(args, "--cert-file", filePath)
|
||||
}
|
||||
|
||||
if len(creds.KeyData) > 0 {
|
||||
filePath, closer, err := writeToTmp(creds.KeyData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer argoio.Close(closer)
|
||||
args = append(args, "--key-file", filePath)
|
||||
}
|
||||
|
||||
if creds.InsecureSkipVerify && c.insecureSkipVerifySupported {
|
||||
args = append(args, "--insecure-skip-tls-verify")
|
||||
}
|
||||
|
||||
@@ -35,8 +35,9 @@ type Kustomize interface {
|
||||
}
|
||||
|
||||
// NewKustomizeApp create a new wrapper to run commands on the `kustomize` command-line tool.
|
||||
func NewKustomizeApp(path string, creds git.Creds, fromRepo string, binaryPath string) Kustomize {
|
||||
func NewKustomizeApp(repoRoot string, path string, creds git.Creds, fromRepo string, binaryPath string) Kustomize {
|
||||
return &kustomize{
|
||||
repoRoot: repoRoot,
|
||||
path: path,
|
||||
creds: creds,
|
||||
repo: fromRepo,
|
||||
@@ -45,6 +46,8 @@ func NewKustomizeApp(path string, creds git.Creds, fromRepo string, binaryPath s
|
||||
}
|
||||
|
||||
type kustomize struct {
|
||||
// path to the Git repository root
|
||||
repoRoot string
|
||||
// path inside the checked out tree
|
||||
path string
|
||||
// creds structure
|
||||
@@ -301,6 +304,7 @@ func (k *kustomize) Build(opts *v1alpha1.ApplicationSourceKustomize, kustomizeOp
|
||||
cmd = exec.Command(k.getBinaryPath(), "build", k.path)
|
||||
}
|
||||
cmd.Env = env
|
||||
cmd.Dir = k.repoRoot
|
||||
out, err := executil.Run(cmd)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestKustomizeBuild(t *testing.T) {
|
||||
namePrefix := "namePrefix-"
|
||||
nameSuffix := "-nameSuffix"
|
||||
namespace := "custom-namespace"
|
||||
kustomize := NewKustomizeApp(appPath, git.NopCreds{}, "", "")
|
||||
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", "")
|
||||
env := &v1alpha1.Env{
|
||||
&v1alpha1.EnvEntry{Name: "ARGOCD_APP_NAME", Value: "argo-cd-tests"},
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func TestKustomizeBuild(t *testing.T) {
|
||||
func TestFailKustomizeBuild(t *testing.T) {
|
||||
appPath, err := testDataDir(t, kustomization1)
|
||||
assert.Nil(t, err)
|
||||
kustomize := NewKustomizeApp(appPath, git.NopCreds{}, "", "")
|
||||
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", "")
|
||||
kustomizeSource := v1alpha1.ApplicationSourceKustomize{
|
||||
Replicas: []v1alpha1.KustomizeReplica{
|
||||
{
|
||||
@@ -222,7 +222,7 @@ func TestKustomizeBuildForceCommonLabels(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
appPath, err := testDataDir(t, tc.TestData)
|
||||
assert.Nil(t, err)
|
||||
kustomize := NewKustomizeApp(appPath, git.NopCreds{}, "", "")
|
||||
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", "")
|
||||
objs, _, err := kustomize.Build(&tc.KustomizeSource, nil, tc.Env)
|
||||
switch tc.ExpectErr {
|
||||
case true:
|
||||
@@ -314,7 +314,7 @@ func TestKustomizeBuildForceCommonAnnotations(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
appPath, err := testDataDir(t, tc.TestData)
|
||||
assert.Nil(t, err)
|
||||
kustomize := NewKustomizeApp(appPath, git.NopCreds{}, "", "")
|
||||
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", "")
|
||||
objs, _, err := kustomize.Build(&tc.KustomizeSource, nil, tc.Env)
|
||||
switch tc.ExpectErr {
|
||||
case true:
|
||||
@@ -334,7 +334,7 @@ func TestKustomizeCustomVersion(t *testing.T) {
|
||||
kustomizePath, err := testDataDir(t, kustomization4)
|
||||
assert.Nil(t, err)
|
||||
envOutputFile := kustomizePath + "/env_output"
|
||||
kustomize := NewKustomizeApp(appPath, git.NopCreds{}, "", kustomizePath+"/kustomize.special")
|
||||
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", kustomizePath+"/kustomize.special")
|
||||
kustomizeSource := v1alpha1.ApplicationSourceKustomize{
|
||||
Version: "special",
|
||||
}
|
||||
@@ -356,7 +356,7 @@ func TestKustomizeCustomVersion(t *testing.T) {
|
||||
func TestKustomizeBuildComponents(t *testing.T) {
|
||||
appPath, err := testDataDir(t, kustomization6)
|
||||
assert.Nil(t, err)
|
||||
kustomize := NewKustomizeApp(appPath, git.NopCreds{}, "", "")
|
||||
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", "")
|
||||
|
||||
kustomizeSource := v1alpha1.ApplicationSourceKustomize{
|
||||
Components: []string{"./components"},
|
||||
@@ -377,7 +377,7 @@ func TestKustomizeBuildComponents(t *testing.T) {
|
||||
func TestKustomizeBuildPatches(t *testing.T) {
|
||||
appPath, err := testDataDir(t, kustomization5)
|
||||
assert.Nil(t, err)
|
||||
kustomize := NewKustomizeApp(appPath, git.NopCreds{}, "", "")
|
||||
kustomize := NewKustomizeApp(appPath, appPath, git.NopCreds{}, "", "")
|
||||
|
||||
kustomizeSource := v1alpha1.ApplicationSourceKustomize{
|
||||
Patches: []v1alpha1.KustomizePatch{
|
||||
|
||||
Reference in New Issue
Block a user