Compare commits
1 Commits
commit-ser
...
temp-cherr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a579092c62 |
@@ -18,10 +18,8 @@ hack/
|
||||
docs/
|
||||
examples/
|
||||
.github/
|
||||
!test/container
|
||||
!test/e2e/testdata
|
||||
!test/fixture
|
||||
!test/remote
|
||||
!test/container
|
||||
!hack/installers
|
||||
!hack/gpg-wrapper.sh
|
||||
!hack/git-verify-wrapper.sh
|
||||
|
||||
1
.github/workflows/ci-build.yaml
vendored
@@ -360,7 +360,6 @@ jobs:
|
||||
name: Run end-to-end tests
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14]
|
||||
needs:
|
||||
|
||||
@@ -2,9 +2,7 @@
|
||||
** @argoproj/argocd-approvers
|
||||
|
||||
# Docs
|
||||
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/mkdocs.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
|
||||
# CI
|
||||
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Please refer to [the Contribution Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/code-contributions/)
|
||||
13
Makefile
@@ -49,7 +49,7 @@ ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=60m
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=45m
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
@@ -386,9 +386,9 @@ test: test-tools-image
|
||||
.PHONY: test-local
|
||||
test-local:
|
||||
if test "$(TEST_MODULE)" = ""; then \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -coverprofile=coverage.out; \
|
||||
./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
|
||||
else \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
|
||||
./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
|
||||
fi
|
||||
|
||||
.PHONY: test-race
|
||||
@@ -400,9 +400,9 @@ test-race: test-tools-image
|
||||
.PHONY: test-race-local
|
||||
test-race-local:
|
||||
if test "$(TEST_MODULE)" = ""; then \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -race -coverprofile=coverage.out; \
|
||||
./hack/test.sh -race -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
|
||||
else \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -race -coverprofile=coverage.out; \
|
||||
./hack/test.sh -race -coverprofile=coverage.out "$(TEST_MODULE)"; \
|
||||
fi
|
||||
|
||||
# Run the E2E test suite. E2E test servers (see start-e2e target) must be
|
||||
@@ -416,7 +416,7 @@ test-e2e:
|
||||
test-e2e-local: cli-local
|
||||
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
|
||||
export GO111MODULE=off
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v
|
||||
ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v ./test/e2e
|
||||
|
||||
# Spawns a shell in the test server container for debugging purposes
|
||||
debug-test-server: test-tools-image
|
||||
@@ -557,7 +557,6 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
|
||||
install-test-tools-local:
|
||||
./hack/install.sh kustomize
|
||||
./hack/install.sh helm-linux
|
||||
./hack/install.sh gotestsum
|
||||
|
||||
# Installs all tools required for running codegen (Linux packages)
|
||||
.PHONY: install-codegen-tools-local
|
||||
|
||||
6
USERS.md
@@ -25,7 +25,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
|
||||
1. [Autodesk](https://www.autodesk.com)
|
||||
2. [Autodesk](https://www.autodesk.com)
|
||||
1. [Axual B.V.](https://axual.com)
|
||||
1. [Back Market](https://www.backmarket.com)
|
||||
1. [Baloise](https://www.baloise.com)
|
||||
@@ -169,7 +169,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Meican](https://meican.com/)
|
||||
1. [Meilleurs Agents](https://www.meilleursagents.com/)
|
||||
1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/)
|
||||
1. [Mercedes-Benz.io](https://www.mercedes-benz.io/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
@@ -210,7 +209,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Patreon](https://www.patreon.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Percona](https://percona.com/)
|
||||
1. [PGS](https://www.pgs.com)
|
||||
1. [Pigment](https://www.gopigment.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
@@ -245,9 +243,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Sauce Labs](https://saucelabs.com/)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [SCRM Lidl International Hub](https://scrm.lidl)
|
||||
1. [SEEK](https://seek.com.au)
|
||||
1. [Semgrep](https://semgrep.com)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
|
||||
@@ -16,6 +16,7 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@@ -44,6 +46,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
|
||||
@@ -114,7 +117,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
// Log a warning if there are unrecognized generators
|
||||
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
desiredApplications, applicationSetReason, err := r.generateApplications(logCtx, applicationSetInfo)
|
||||
desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
@@ -161,9 +164,9 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
if r.EnableProgressiveSyncs {
|
||||
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
// If appset used progressive sync but stopped, clean up the progressive sync application statuses
|
||||
logCtx.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, logCtx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
@@ -178,7 +181,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
@@ -216,7 +219,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
|
||||
validApps, err = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -234,7 +237,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() {
|
||||
err = r.createOrUpdateInCluster(ctx, logCtx, applicationSetInfo, validApps)
|
||||
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
@@ -248,7 +251,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
err = r.createInCluster(ctx, logCtx, applicationSetInfo, validApps)
|
||||
err = r.createInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
@@ -264,7 +267,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
|
||||
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, desiredApplications)
|
||||
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
@@ -489,7 +492,7 @@ func getTempApplication(applicationSetTemplate argov1alpha1.ApplicationSetTempla
|
||||
return &tmplApplication
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, applicationSetInfo argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argov1alpha1.ApplicationSetReasonType, error) {
|
||||
func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argov1alpha1.ApplicationSetReasonType, error) {
|
||||
var res []argov1alpha1.Application
|
||||
|
||||
var firstError error
|
||||
@@ -498,7 +501,7 @@ func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, appli
|
||||
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
|
||||
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo, map[string]interface{}{})
|
||||
if err != nil {
|
||||
logCtx.WithError(err).WithField("generator", requestedGenerator).
|
||||
log.WithError(err).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
@@ -513,7 +516,7 @@ func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, appli
|
||||
for _, p := range a.Params {
|
||||
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
|
||||
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
|
||||
Error("error generating application from params")
|
||||
|
||||
if firstError == nil {
|
||||
@@ -526,8 +529,8 @@ func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, appli
|
||||
}
|
||||
}
|
||||
|
||||
logCtx.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
|
||||
logCtx.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
|
||||
log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
|
||||
log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
|
||||
}
|
||||
|
||||
return res, applicationSetReason, firstError
|
||||
@@ -541,24 +544,22 @@ func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
|
||||
}
|
||||
}
|
||||
|
||||
func appControllerIndexer(rawObj client.Object) []string {
|
||||
// grab the job object, extract the owner...
|
||||
app := rawObj.(*argov1alpha1.Application)
|
||||
owner := metav1.GetControllerOf(app)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
// ...make sure it's a application set...
|
||||
if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", appControllerIndexer); err != nil {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
|
||||
// grab the job object, extract the owner...
|
||||
app := rawObj.(*argov1alpha1.Application)
|
||||
owner := metav1.GetControllerOf(app)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
// ...make sure it's a application set...
|
||||
if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error setting up with manager: %w", err)
|
||||
}
|
||||
|
||||
@@ -602,14 +603,14 @@ func (r *ApplicationSetReconciler) updateCache(ctx context.Context, obj client.O
|
||||
// - For new applications, it will call create
|
||||
// - For existing application, it will call update
|
||||
// The function also adds owner reference to all applications, and uses it to delete them.
|
||||
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var firstError error
|
||||
// Creates or updates the application in appList
|
||||
for _, generatedApp := range desiredApplications {
|
||||
generatedApp.Namespace = applicationSet.Namespace
|
||||
|
||||
appLog := logCtx.WithFields(log.Fields{"app": generatedApp.QualifiedName()})
|
||||
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
|
||||
generatedApp.Namespace = applicationSet.Namespace
|
||||
|
||||
// Normalize to avoid fighting with the application controller.
|
||||
generatedApp.Spec = *argoutil.NormalizeApplicationSpec(&generatedApp.Spec)
|
||||
@@ -625,7 +626,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, found, func() error {
|
||||
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
@@ -678,6 +679,13 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
|
||||
if found != nil && len(found.Spec.IgnoreDifferences) > 0 {
|
||||
err := applyIgnoreDifferences(applicationSet.Spec.IgnoreApplicationDifferences, found, generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply ignore differences: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
|
||||
@@ -689,23 +697,63 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
continue
|
||||
}
|
||||
r.updateCache(ctx, found, appLog)
|
||||
|
||||
if action != controllerutil.OperationResultNone {
|
||||
// Don't pollute etcd with "unchanged Application" events
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
} else {
|
||||
// "unchanged Application" can be inferred by Reconcile Complete with no action being listed
|
||||
// Or enable debug logging
|
||||
appLog.Logf(log.DebugLevel, "%s Application", action)
|
||||
}
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the found application in place.
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp argov1alpha1.Application) error {
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
|
||||
WithNoCache().
|
||||
Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build diff config: %w", err)
|
||||
}
|
||||
unstructuredFound, err := appToUnstructured(found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
unstructuredGenerated, err := appToUnstructured(&generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to normalize application spec: %w", err)
|
||||
}
|
||||
if len(result.Targets) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
|
||||
}
|
||||
jsonNormalized, err := json.Marshal(result.Targets[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(jsonNormalized, &found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
|
||||
}
|
||||
// Prohibit jq queries from mutating silly things.
|
||||
found.TypeMeta = generatedApp.TypeMeta
|
||||
found.Name = generatedApp.Name
|
||||
found.Namespace = generatedApp.Namespace
|
||||
found.Operation = generatedApp.Operation
|
||||
return nil
|
||||
}
|
||||
|
||||
func appToUnstructured(app *argov1alpha1.Application) (*unstructured.Unstructured, error) {
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: u}, nil
|
||||
}
|
||||
|
||||
// createInCluster will filter from the desiredApplications only the application that needs to be created
|
||||
// Then it will call createOrUpdateInCluster to do the actual create
|
||||
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
|
||||
var createApps []argov1alpha1.Application
|
||||
current, err := r.getCurrentApplications(ctx, applicationSet)
|
||||
@@ -728,7 +776,7 @@ func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, logCtx *
|
||||
}
|
||||
}
|
||||
|
||||
return r.createOrUpdateInCluster(ctx, logCtx, applicationSet, createApps)
|
||||
return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
|
||||
@@ -745,7 +793,7 @@ func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, app
|
||||
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
|
||||
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
|
||||
// clusterList, err := argoDB.ListClusters(ctx)
|
||||
@@ -769,15 +817,15 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
|
||||
// Delete apps that are not in m[string]bool
|
||||
var firstError error
|
||||
for _, app := range current {
|
||||
logCtx = logCtx.WithField("app", app.QualifiedName())
|
||||
appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
|
||||
_, exists := m[app.Name]
|
||||
|
||||
if !exists {
|
||||
|
||||
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
|
||||
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, logCtx)
|
||||
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Error("failed to update Application")
|
||||
appLog.WithError(err).Error("failed to update Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
@@ -786,14 +834,14 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
|
||||
|
||||
err = r.Client.Delete(ctx, &app)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Error("failed to delete Application")
|
||||
appLog.WithError(err).Error("failed to delete Application")
|
||||
if firstError != nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
|
||||
logCtx.Log(log.InfoLevel, "Deleted application")
|
||||
appLog.Log(log.InfoLevel, "Deleted application")
|
||||
}
|
||||
}
|
||||
return firstError
|
||||
@@ -856,11 +904,7 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
if len(newFinalizers) != len(app.Finalizers) {
|
||||
updated := app.DeepCopy()
|
||||
updated.Finalizers = newFinalizers
|
||||
patch := client.MergeFrom(app)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
utils.LogPatch(appLog, patch, updated)
|
||||
}
|
||||
if err := r.Client.Patch(ctx, updated, patch); err != nil {
|
||||
if err := r.Client.Patch(ctx, updated, client.MergeFrom(app)); err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
}
|
||||
r.updateCache(ctx, updated, appLog)
|
||||
@@ -875,21 +919,21 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(logCtx, appset, desiredApplications)
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
|
||||
_, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
log.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
for i, step := range appDependencyList {
|
||||
logCtx.Infof("step %v: %+v", i+1, step)
|
||||
log.Infof("step %v: %+v", i+1, step)
|
||||
}
|
||||
|
||||
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
|
||||
@@ -897,9 +941,9 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
|
||||
return nil, fmt.Errorf("failed to build app sync map: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap, appMap)
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
|
||||
}
|
||||
@@ -913,7 +957,7 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
|
||||
}
|
||||
|
||||
// this list tracks which Applications belong to each RollingUpdate step
|
||||
func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
|
||||
func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
|
||||
|
||||
if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" {
|
||||
return [][]string{}, map[string]int{}, nil
|
||||
@@ -940,9 +984,9 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, app
|
||||
for _, matchExpression := range step.MatchExpressions {
|
||||
|
||||
if val, ok := app.Labels[matchExpression.Key]; ok {
|
||||
valueMatched := labelMatchedExpression(logCtx, val, matchExpression)
|
||||
valueMatched := labelMatchedExpression(val, matchExpression)
|
||||
|
||||
if !valueMatched { // none of the matchExpression values was a match with the Application's labels
|
||||
if !valueMatched { // none of the matchExpression values was a match with the Application'ss labels
|
||||
selected = false
|
||||
break
|
||||
}
|
||||
@@ -955,7 +999,7 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, app
|
||||
if selected {
|
||||
appDependencyList[i] = append(appDependencyList[i], app.Name)
|
||||
if val, ok := appStepMap[app.Name]; ok {
|
||||
logCtx.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
|
||||
log.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
|
||||
} else {
|
||||
appStepMap[app.Name] = i
|
||||
}
|
||||
@@ -966,9 +1010,9 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, app
|
||||
return appDependencyList, appStepMap, nil
|
||||
}
|
||||
|
||||
func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
|
||||
func labelMatchedExpression(val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
|
||||
if matchExpression.Operator != "In" && matchExpression.Operator != "NotIn" {
|
||||
logCtx.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator)
|
||||
log.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1072,7 +1116,7 @@ func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
}
|
||||
|
||||
// check the status of each Application's status and promote Applications to the next status if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
|
||||
now := metav1.Now()
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
|
||||
@@ -1105,7 +1149,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
}
|
||||
|
||||
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
|
||||
logCtx.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
|
||||
log.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
@@ -1117,15 +1161,15 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
// this covers race conditions where syncs initiated by RollingSync miraculously have a sync time before the transition to Pending state occurred (could be a few seconds)
|
||||
if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.Add(time.Duration(10)*time.Second).After(currentAppStatus.LastTransitionTime.Time) {
|
||||
if !app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
|
||||
logCtx.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name)
|
||||
log.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name)
|
||||
}
|
||||
logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
@@ -1134,7 +1178,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
|
||||
logCtx.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
|
||||
log.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
@@ -1142,7 +1186,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
logCtx.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
|
||||
log.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
@@ -1152,7 +1196,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet application statuses: %w", err)
|
||||
}
|
||||
@@ -1161,7 +1205,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
}
|
||||
|
||||
// check Applications that are in Waiting status and promote them to Pending if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
|
||||
@@ -1203,7 +1247,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
if maxUpdate != nil {
|
||||
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
|
||||
if err != nil {
|
||||
logCtx.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
|
||||
log.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
|
||||
}
|
||||
|
||||
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
|
||||
@@ -1213,13 +1257,13 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
|
||||
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
|
||||
maxUpdateAllowed = false
|
||||
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
|
||||
log.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
|
||||
logCtx.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
|
||||
log.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
@@ -1232,7 +1276,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
}
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet app status: %w", err)
|
||||
}
|
||||
@@ -1294,7 +1338,7 @@ func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplica
|
||||
|
||||
// setApplicationSetApplicationStatus updates the ApplicatonSet's status field
|
||||
// with any new/changed Application statuses.
|
||||
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
|
||||
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
|
||||
needToUpdateStatus := false
|
||||
|
||||
if len(applicationStatuses) != len(applicationSet.Status.ApplicationStatus) {
|
||||
@@ -1328,7 +1372,7 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
if err != nil {
|
||||
|
||||
logCtx.Errorf("unable to set application set status: %v", err)
|
||||
log.Errorf("unable to set application set status: %v", err)
|
||||
return fmt.Errorf("unable to set application set status: %v", err)
|
||||
}
|
||||
|
||||
@@ -1343,7 +1387,7 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
|
||||
rolloutApps := []argov1alpha1.Application{}
|
||||
for i := range validApps {
|
||||
pruneEnabled := false
|
||||
@@ -1363,7 +1407,7 @@ func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, appl
|
||||
|
||||
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
|
||||
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
|
||||
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
log.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
validApps[i], _ = syncApplication(validApps[i], pruneEnabled)
|
||||
}
|
||||
rolloutApps = append(rolloutApps, validApps[i])
|
||||
@@ -1407,51 +1451,29 @@ func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
// if we are the owner and there is a create event, we most likely created it and do not need to
|
||||
// re-reconcile
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
var appName string
|
||||
app, isApp := e.Object.(*argov1alpha1.Application)
|
||||
if isApp {
|
||||
appName = app.QualifiedName()
|
||||
}
|
||||
log.WithField("app", appName).Debugln("received create event from owning an application")
|
||||
}
|
||||
log.Debugln("received create event from owning an application")
|
||||
return false
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
var appName string
|
||||
app, isApp := e.Object.(*argov1alpha1.Application)
|
||||
if isApp {
|
||||
appName = app.QualifiedName()
|
||||
}
|
||||
log.WithField("app", appName).Debugln("received delete event from owning an application")
|
||||
}
|
||||
log.Debugln("received delete event from owning an application")
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
log.Debugln("received update event from owning an application")
|
||||
appOld, isApp := e.ObjectOld.(*argov1alpha1.Application)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
logCtx := log.WithField("app", appOld.QualifiedName())
|
||||
logCtx.Debugln("received update event from owning an application")
|
||||
appNew, isApp := e.ObjectNew.(*argov1alpha1.Application)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
|
||||
logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
|
||||
log.Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
|
||||
return requeue
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
var appName string
|
||||
app, isApp := e.Object.(*argov1alpha1.Application)
|
||||
if isApp {
|
||||
appName = app.QualifiedName()
|
||||
}
|
||||
log.WithField("app", appName).Debugln("received generic event from owning an application")
|
||||
}
|
||||
log.Debugln("received generic event from owning an application")
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -218,7 +220,7 @@ func TestExtractApplications(t *testing.T) {
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
|
||||
got, reason, err := r.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
|
||||
got, reason, err := r.generateApplications(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
@@ -331,7 +333,7 @@ func TestMergeTemplateApplications(t *testing.T) {
|
||||
KubeClientset: kubefake.NewSimpleClientset(),
|
||||
}
|
||||
|
||||
got, _, _ := r.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
|
||||
got, _, _ := r.generateApplications(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
@@ -979,296 +981,6 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "Ensure that ignored targetRevision difference doesn't cause an update, even if another field changes",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// The targetRevision is ignored, so this should not be updated.
|
||||
TargetRevision: "foo",
|
||||
// This should be updated.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// This is the existing value from the cluster, which should not be updated because the field is ignored.
|
||||
TargetRevision: "bar",
|
||||
// This was missing on the cluster, so it should be added.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
// This should not be updated, because reconciliation shouldn't modify the App.
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
// This existed only in the cluster, but it shouldn't be removed, because the field is ignored.
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "Demonstrate limitation of MergePatch", // Maybe we can fix this in Argo CD 3.0: https://github.com/argoproj/argo-cd/issues/15975
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
// The Parameters field got blown away, because the values field changed. MergePatch
|
||||
// doesn't merge list items, it replaces the whole list if an item changes.
|
||||
// If we eventually add a `name` field to Sources, we can use StrategicMergePatch.
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
||||
@@ -1282,7 +994,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
initObjs = append(initObjs, &a)
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -1291,8 +1003,8 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
|
||||
err = r.createOrUpdateInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
|
||||
assert.NoError(t, err)
|
||||
err = r.createOrUpdateInCluster(context.TODO(), c.appSet, c.desiredApps)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, obj := range c.expected {
|
||||
got := &v1alpha1.Application{}
|
||||
@@ -1302,6 +1014,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
}, got)
|
||||
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, obj, *got)
|
||||
}
|
||||
})
|
||||
@@ -1375,7 +1088,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret",
|
||||
@@ -1537,7 +1250,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret",
|
||||
@@ -1769,7 +1482,7 @@ func TestCreateApplications(t *testing.T) {
|
||||
initObjs = append(initObjs, &a)
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -1778,7 +1491,7 @@ func TestCreateApplications(t *testing.T) {
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
|
||||
err = r.createInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.apps)
|
||||
err = r.createInCluster(context.TODO(), c.appSet, c.apps)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, obj := range c.expected {
|
||||
@@ -1913,7 +1626,7 @@ func TestDeleteInCluster(t *testing.T) {
|
||||
initObjs = append(initObjs, &temp)
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -1922,7 +1635,7 @@ func TestDeleteInCluster(t *testing.T) {
|
||||
KubeClientset: kubefake.NewSimpleClientset(),
|
||||
}
|
||||
|
||||
err = r.deleteInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
|
||||
err = r.deleteInCluster(context.TODO(), c.appSet, c.desiredApps)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// For each of the expected objects, verify they exist on the cluster
|
||||
@@ -2287,15 +2000,7 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&project}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil)
|
||||
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
goodCluster,
|
||||
}}, nil)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -2371,7 +2076,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -2441,7 +2146,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
@@ -2611,7 +2316,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
@@ -2812,7 +2517,7 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
|
||||
KubeClientset: kubefake.NewSimpleClientset(),
|
||||
}
|
||||
|
||||
gotApp, _, _ := appSetReconciler.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
|
||||
gotApp, _, _ := appSetReconciler.generateApplications(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{{
|
||||
@@ -2922,7 +2627,7 @@ func TestPolicies(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -3101,7 +2806,7 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
|
||||
KubeClientset: kubeclientset,
|
||||
}
|
||||
|
||||
err = r.setAppSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appStatuses)
|
||||
err = r.setAppSetApplicationStatus(context.TODO(), &cc.appSet, cc.appStatuses)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, cc.expectedAppStatuses, cc.appSet.Status.ApplicationStatus)
|
||||
@@ -3864,7 +3569,7 @@ func TestBuildAppDependencyList(t *testing.T) {
|
||||
KubeClientset: kubeclientset,
|
||||
}
|
||||
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(log.NewEntry(log.StandardLogger()), cc.appSet, cc.apps)
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(context.TODO(), cc.appSet, cc.apps)
|
||||
assert.Equal(t, err, nil, "expected no errors, but errors occured")
|
||||
assert.Equal(t, cc.expectedList, appDependencyList, "expected appDependencyList did not match actual")
|
||||
assert.Equal(t, cc.expectedStepMap, appStepMap, "expected appStepMap did not match actual")
|
||||
@@ -5118,7 +4823,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
KubeClientset: kubeclientset,
|
||||
}
|
||||
|
||||
appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps, cc.appStepMap)
|
||||
appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), &cc.appSet, cc.apps, cc.appStepMap)
|
||||
|
||||
// opt out of testing the LastTransitionTime is accurate
|
||||
for i := range appStatuses {
|
||||
@@ -5872,7 +5577,7 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
|
||||
KubeClientset: kubeclientset,
|
||||
}
|
||||
|
||||
appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appSyncMap, cc.appStepMap, cc.appMap)
|
||||
appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), &cc.appSet, cc.appSyncMap, cc.appStepMap, cc.appMap)
|
||||
|
||||
// opt out of testing the LastTransitionTime is accurate
|
||||
for i := range appStatuses {
|
||||
@@ -6014,3 +5719,173 @@ func TestOwnsHandler(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_applyIgnoreDifferences(t *testing.T) {
|
||||
appMeta := metav1.TypeMeta{
|
||||
APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
|
||||
Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
|
||||
foundApp string
|
||||
generatedApp string
|
||||
expectedApp string
|
||||
}{
|
||||
{
|
||||
name: "empty ignoreDifferences",
|
||||
foundApp: `
|
||||
spec: {}`,
|
||||
generatedApp: `
|
||||
spec: {}`,
|
||||
expectedApp: `
|
||||
spec: {}`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore target revision with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: bar`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
|
||||
name: "ignore helm parameter with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: v1.0.0
|
||||
- name: another
|
||||
value: value`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore auto-sync with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
|
||||
name: "ignore a one-off annotation with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
|
||||
},
|
||||
foundApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
generatedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
some.other: annotation`,
|
||||
expectedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
|
||||
name: "ignore the source.plugin field with a json pointer",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JSONPointers: []string{"/spec/source/plugin"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com/wrong`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
|
||||
require.NoError(t, err, tc.foundApp)
|
||||
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
|
||||
require.NoError(t, err, tc.generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, generatedApp)
|
||||
require.NoError(t, err)
|
||||
jsonFound, err := json.Marshal(tc.foundApp)
|
||||
require.NoError(t, err)
|
||||
jsonExpected, err := json.Marshal(tc.expectedApp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(jsonExpected), string(jsonFound))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,9 +60,9 @@ func TestRequeueAfter(t *testing.T) {
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}, "", []string{""}, true),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}, "", []string{""}),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}, "", []string{""}, true),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}, "", []string{""}),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]generators.Generator{
|
||||
|
||||
@@ -148,9 +148,6 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
return nil, fmt.Errorf("unable to parse file: %v", err)
|
||||
}
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
} else if len(objectsFound) == 0 {
|
||||
// If file is valid but empty, add a default empty item
|
||||
objectsFound = append(objectsFound, map[string]interface{}{})
|
||||
}
|
||||
|
||||
res := []map[string]interface{}{}
|
||||
|
||||
@@ -4,173 +4,119 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_generateParamsFromGitFile(t *testing.T) {
|
||||
defaultContent := []byte(`
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`)
|
||||
type args struct {
|
||||
filePath string
|
||||
fileContent []byte
|
||||
values map[string]string
|
||||
useGoTemplate bool
|
||||
goTemplateOptions []string
|
||||
pathParamPrefix string
|
||||
`), values, false, nil, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []map[string]interface{}
|
||||
wantErr bool
|
||||
}{
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
name: "empty file returns path parameters",
|
||||
args: args{
|
||||
filePath: "path/dir/file_name.yaml",
|
||||
fileContent: []byte(""),
|
||||
values: map[string]string{},
|
||||
useGoTemplate: false,
|
||||
},
|
||||
want: []map[string]interface{}{
|
||||
{
|
||||
"path": "path/dir",
|
||||
"path.basename": "dir",
|
||||
"path.filename": "file_name.yaml",
|
||||
"path.basenameNormalized": "dir",
|
||||
"path.filenameNormalized": "file-name.yaml",
|
||||
"path[0]": "path",
|
||||
"path[1]": "dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid json/yaml file returns error",
|
||||
args: args{
|
||||
filePath: "path/dir/file_name.yaml",
|
||||
fileContent: []byte("this is not json or yaml"),
|
||||
values: map[string]string{},
|
||||
useGoTemplate: false,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "file parameters are added to params",
|
||||
args: args{
|
||||
filePath: "path/dir/file_name.yaml",
|
||||
fileContent: defaultContent,
|
||||
values: map[string]string{},
|
||||
useGoTemplate: false,
|
||||
},
|
||||
want: []map[string]interface{}{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"path": "path/dir",
|
||||
"path.basename": "dir",
|
||||
"path.filename": "file_name.yaml",
|
||||
"path.basenameNormalized": "dir",
|
||||
"path.filenameNormalized": "file-name.yaml",
|
||||
"path[0]": "path",
|
||||
"path[1]": "dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "path parameter are prefixed",
|
||||
args: args{
|
||||
filePath: "path/dir/file_name.yaml",
|
||||
fileContent: defaultContent,
|
||||
values: map[string]string{},
|
||||
useGoTemplate: false,
|
||||
pathParamPrefix: "myRepo",
|
||||
},
|
||||
want: []map[string]interface{}{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"myRepo.path": "path/dir",
|
||||
"myRepo.path.basename": "dir",
|
||||
"myRepo.path.filename": "file_name.yaml",
|
||||
"myRepo.path.basenameNormalized": "dir",
|
||||
"myRepo.path.filenameNormalized": "file-name.yaml",
|
||||
"myRepo.path[0]": "path",
|
||||
"myRepo.path[1]": "dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "file parameters are added to params with go template",
|
||||
args: args{
|
||||
filePath: "path/dir/file_name.yaml",
|
||||
fileContent: defaultContent,
|
||||
values: map[string]string{},
|
||||
useGoTemplate: true,
|
||||
},
|
||||
want: []map[string]interface{}{
|
||||
{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"path": "path/dir",
|
||||
"basename": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"basenameNormalized": "dir",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "path parameter are prefixed with go template",
|
||||
args: args{
|
||||
filePath: "path/dir/file_name.yaml",
|
||||
fileContent: defaultContent,
|
||||
values: map[string]string{},
|
||||
useGoTemplate: true,
|
||||
pathParamPrefix: "myRepo",
|
||||
},
|
||||
want: []map[string]interface{}{
|
||||
{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "path/dir",
|
||||
"basename": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"basenameNormalized": "dir",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo.bar": "baz",
|
||||
"path": "path/dir",
|
||||
"path.basename": "dir",
|
||||
"path.filename": "file_name.yaml",
|
||||
"path.basenameNormalized": "dir",
|
||||
"path.filenameNormalized": "file-name.yaml",
|
||||
"path[0]": "path",
|
||||
"path[1]": "dir",
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFile(t *testing.T) {
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), values, false, nil, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile(tt.args.filePath, tt.args.fileContent, tt.args.values, tt.args.useGoTemplate, tt.args.goTemplateOptions, tt.args.pathParamPrefix)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GitGenerator.generateParamsFromGitFile() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
assert.Equal(t, tt.want, params)
|
||||
})
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"myRepo.path": "path/dir",
|
||||
"myRepo.path.basename": "dir",
|
||||
"myRepo.path.filename": "file_name.yaml",
|
||||
"myRepo.path.basenameNormalized": "dir",
|
||||
"myRepo.path.filenameNormalized": "file-name.yaml",
|
||||
"myRepo.path[0]": "path",
|
||||
"myRepo.path[1]": "dir",
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), values, true, nil, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
"path": map[string]interface{}{
|
||||
"path": "path/dir",
|
||||
"basename": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"basenameNormalized": "dir",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
values := map[string]string{}
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), values, true, nil, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "path/dir",
|
||||
"basename": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"basenameNormalized": "dir",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
@@ -27,16 +27,14 @@ type PullRequestGenerator struct {
|
||||
auth SCMAuthProviders
|
||||
scmRootCAPath string
|
||||
allowedSCMProviders []string
|
||||
enableSCMProviders bool
|
||||
}
|
||||
|
||||
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders, scmRootCAPath string, allowedScmProviders []string, enableSCMProviders bool) Generator {
|
||||
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders, scmRootCAPath string, allowedScmProviders []string) Generator {
|
||||
g := &PullRequestGenerator{
|
||||
client: client,
|
||||
auth: auth,
|
||||
scmRootCAPath: scmRootCAPath,
|
||||
allowedSCMProviders: allowedScmProviders,
|
||||
enableSCMProviders: enableSCMProviders,
|
||||
}
|
||||
g.selectServiceProviderFunc = g.selectServiceProvider
|
||||
return g
|
||||
@@ -68,7 +66,7 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
ctx := context.Background()
|
||||
svc, err := g.selectServiceProviderFunc(ctx, appSetGenerator.PullRequest, applicationSetInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to select pull request service provider: %w", err)
|
||||
return nil, fmt.Errorf("failed to select pull request service provider: %v", err)
|
||||
}
|
||||
|
||||
pulls, err := pullrequest.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
|
||||
@@ -123,18 +121,17 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
|
||||
// selectServiceProvider selects the provider to get pull requests from the configuration
|
||||
func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, generatorConfig *argoprojiov1alpha1.PullRequestGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
if !g.enableSCMProviders {
|
||||
return nil, ErrSCMProvidersDisabled
|
||||
}
|
||||
if err := ScmProviderAllowed(applicationSetInfo, generatorConfig, g.allowedSCMProviders); err != nil {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %w", err)
|
||||
}
|
||||
|
||||
if generatorConfig.Github != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, generatorConfig.Github.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Github.API)
|
||||
}
|
||||
return g.github(ctx, generatorConfig.Github, applicationSetInfo)
|
||||
}
|
||||
if generatorConfig.GitLab != nil {
|
||||
providerConfig := generatorConfig.GitLab
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
@@ -143,6 +140,9 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
}
|
||||
if generatorConfig.Gitea != nil {
|
||||
providerConfig := generatorConfig.Gitea
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Gitea.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
@@ -151,6 +151,9 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
}
|
||||
if generatorConfig.BitbucketServer != nil {
|
||||
providerConfig := generatorConfig.BitbucketServer
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
|
||||
@@ -278,7 +278,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
providerConfig *argoprojiov1alpha1.PullRequestGenerator
|
||||
expectedError error
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "Error Github",
|
||||
@@ -287,7 +287,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitlab",
|
||||
@@ -296,7 +296,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitea",
|
||||
@@ -305,7 +305,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Bitbucket",
|
||||
@@ -314,7 +314,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -330,7 +330,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
}, true)
|
||||
})
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -346,29 +346,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
_, err := pullRequestGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
|
||||
assert.Error(t, err, "Must return an error")
|
||||
assert.ErrorAs(t, err, testCaseCopy.expectedError)
|
||||
assert.Equal(t, testCaseCopy.expectedError, err.Error())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSCMProviderDisabled_PRGenerator(t *testing.T) {
|
||||
generator := NewPullRequestGenerator(nil, SCMAuthProviders{}, "", []string{}, false)
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
Github: &argoprojiov1alpha1.PullRequestGeneratorGithub{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := generator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
assert.ErrorIs(t, err, ErrSCMProvidersDisabled)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -32,26 +31,24 @@ type SCMProviderGenerator struct {
|
||||
SCMAuthProviders
|
||||
scmRootCAPath string
|
||||
allowedSCMProviders []string
|
||||
enableSCMProviders bool
|
||||
}
|
||||
|
||||
type SCMAuthProviders struct {
|
||||
GitHubApps github_app_auth.Credentials
|
||||
}
|
||||
|
||||
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders, scmRootCAPath string, allowedSCMProviders []string, enableSCMProviders bool) Generator {
|
||||
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders, scmRootCAPath string, allowedSCMProviders []string) Generator {
|
||||
return &SCMProviderGenerator{
|
||||
client: client,
|
||||
SCMAuthProviders: providers,
|
||||
scmRootCAPath: scmRootCAPath,
|
||||
allowedSCMProviders: allowedSCMProviders,
|
||||
enableSCMProviders: enableSCMProviders,
|
||||
}
|
||||
}
|
||||
|
||||
// Testing generator
|
||||
func NewTestSCMProviderGenerator(overrideProvider scm_provider.SCMProviderService) Generator {
|
||||
return &SCMProviderGenerator{overrideProvider: overrideProvider, enableSCMProviders: true}
|
||||
return &SCMProviderGenerator{overrideProvider: overrideProvider}
|
||||
}
|
||||
|
||||
func (g *SCMProviderGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
@@ -68,34 +65,14 @@ func (g *SCMProviderGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.A
|
||||
return &appSetGenerator.SCMProvider.Template
|
||||
}
|
||||
|
||||
var ErrSCMProvidersDisabled = errors.New("scm providers are disabled")
|
||||
|
||||
type ErrDisallowedSCMProvider struct {
|
||||
Provider string
|
||||
Allowed []string
|
||||
}
|
||||
|
||||
func NewErrDisallowedSCMProvider(provider string, allowed []string) ErrDisallowedSCMProvider {
|
||||
return ErrDisallowedSCMProvider{
|
||||
Provider: provider,
|
||||
Allowed: allowed,
|
||||
}
|
||||
}
|
||||
|
||||
func (e ErrDisallowedSCMProvider) Error() string {
|
||||
return fmt.Sprintf("scm provider %q not allowed, must use one of the following: %s", e.Provider, strings.Join(e.Allowed, ", "))
|
||||
}
|
||||
|
||||
func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, generator SCMGeneratorWithCustomApiUrl, allowedScmProviders []string) error {
|
||||
url := generator.CustomApiUrl()
|
||||
|
||||
func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, url string, allowedScmProviders []string) bool {
|
||||
if url == "" || len(allowedScmProviders) == 0 {
|
||||
return nil
|
||||
return true
|
||||
}
|
||||
|
||||
for _, allowedScmProvider := range allowedScmProviders {
|
||||
if url == allowedScmProvider {
|
||||
return nil
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,9 +80,9 @@ func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, g
|
||||
common.SecurityField: common.SecurityMedium,
|
||||
"applicationset": applicationSetInfo.Name,
|
||||
"appSetNamespace": applicationSetInfo.Namespace,
|
||||
}).Debugf("attempted to use disallowed SCM %q, must use one of the following: %s", url, strings.Join(allowedScmProviders, ", "))
|
||||
}).Debugf("attempted to use disallowed SCM %q", url)
|
||||
|
||||
return NewErrDisallowedSCMProvider(url, allowedScmProviders)
|
||||
return false
|
||||
}
|
||||
|
||||
func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
@@ -117,28 +94,26 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
if !g.enableSCMProviders {
|
||||
return nil, ErrSCMProvidersDisabled
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
// Create the SCM provider helper.
|
||||
providerConfig := appSetGenerator.SCMProvider
|
||||
|
||||
if err := ScmProviderAllowed(applicationSetInfo, providerConfig, g.allowedSCMProviders); err != nil {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %w", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var provider scm_provider.SCMProviderService
|
||||
if g.overrideProvider != nil {
|
||||
provider = g.overrideProvider
|
||||
} else if providerConfig.Github != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Github.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Github.API)
|
||||
}
|
||||
var err error
|
||||
provider, err = g.githubProvider(ctx, providerConfig.Github, applicationSetInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scm provider: %w", err)
|
||||
}
|
||||
} else if providerConfig.Gitlab != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitlab.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitlab.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.Gitlab.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitlab token: %v", err)
|
||||
@@ -148,6 +123,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error initializing Gitlab service: %v", err)
|
||||
}
|
||||
} else if providerConfig.Gitea != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitea.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitea.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitea token: %v", err)
|
||||
@@ -158,6 +136,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
}
|
||||
} else if providerConfig.BitbucketServer != nil {
|
||||
providerConfig := providerConfig.BitbucketServer
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
var scmError error
|
||||
if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
@@ -172,6 +153,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error initializing Bitbucket Server service: %v", scmError)
|
||||
}
|
||||
} else if providerConfig.AzureDevOps != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.AzureDevOps.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.AzureDevOps.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Azure Devops access token: %v", err)
|
||||
|
||||
@@ -174,7 +174,7 @@ func TestSCMProviderGenerateParams(t *testing.T) {
|
||||
mockProvider := &scm_provider.MockProvider{
|
||||
Repos: testCaseCopy.repos,
|
||||
}
|
||||
scmGenerator := &SCMProviderGenerator{overrideProvider: mockProvider, enableSCMProviders: true}
|
||||
scmGenerator := &SCMProviderGenerator{overrideProvider: mockProvider}
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -205,7 +205,7 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
providerConfig *argoprojiov1alpha1.SCMProviderGenerator
|
||||
expectedError error
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "Error Github",
|
||||
@@ -214,7 +214,7 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitlab",
|
||||
@@ -223,7 +223,7 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitea",
|
||||
@@ -232,7 +232,7 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Bitbucket",
|
||||
@@ -241,7 +241,7 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error AzureDevops",
|
||||
@@ -250,7 +250,7 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: &ErrDisallowedSCMProvider{},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -260,16 +260,13 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
scmGenerator := &SCMProviderGenerator{
|
||||
allowedSCMProviders: []string{
|
||||
"github.myorg.com",
|
||||
"gitlab.myorg.com",
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
},
|
||||
enableSCMProviders: true,
|
||||
}
|
||||
scmGenerator := &SCMProviderGenerator{allowedSCMProviders: []string{
|
||||
"github.myorg.com",
|
||||
"gitlab.myorg.com",
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
}}
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -285,29 +282,7 @@ func TestAllowedSCMProvider(t *testing.T) {
|
||||
_, err := scmGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
|
||||
assert.Error(t, err, "Must return an error")
|
||||
assert.ErrorAs(t, err, testCaseCopy.expectedError)
|
||||
assert.Equal(t, testCaseCopy.expectedError, err.Error())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSCMProviderDisabled_SCMGenerator(t *testing.T) {
|
||||
generator := &SCMProviderGenerator{enableSCMProviders: false}
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
SCMProvider: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Github: &argoprojiov1alpha1.SCMProviderGeneratorGithub{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := generator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
assert.ErrorIs(t, err, ErrSCMProvidersDisabled)
|
||||
}
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
package generators
|
||||
|
||||
type SCMGeneratorWithCustomApiUrl interface {
|
||||
CustomApiUrl() string
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func secretToCluster(s *corev1.Secret) (*appv1.Cluster, error) {
|
||||
if val, err := strconv.Atoi(string(shardStr)); err != nil {
|
||||
log.Warnf("Error while parsing shard in cluster secret '%s': %v", s.Name, err)
|
||||
} else {
|
||||
shard = pointer.Int64(int64(val))
|
||||
shard = pointer.Int64Ptr(int64(val))
|
||||
}
|
||||
}
|
||||
cluster := appv1.Cluster{
|
||||
|
||||
@@ -2,24 +2,19 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
)
|
||||
|
||||
// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function
|
||||
@@ -35,7 +30,7 @@ import (
|
||||
// The MutateFn is called regardless of creating or updating an object.
|
||||
//
|
||||
// It returns the executed operation and an error.
|
||||
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
|
||||
key := client.ObjectKeyFromObject(obj)
|
||||
if err := c.Get(ctx, key, obj); err != nil {
|
||||
@@ -51,24 +46,15 @@ func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ign
|
||||
return controllerutil.OperationResultCreated, nil
|
||||
}
|
||||
|
||||
normalizedLive := obj.DeepCopy()
|
||||
|
||||
// Mutate the live object to match the desired state.
|
||||
existingObj := obj.DeepCopyObject()
|
||||
existing, ok := existingObj.(client.Object)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("existing object is not a client.Object"))
|
||||
}
|
||||
if err := mutate(f, key, obj); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
|
||||
// Apply ignoreApplicationDifferences rules to remove ignored fields from both the live and the desired state. This
|
||||
// prevents those differences from appearing in the diff and therefore in the patch.
|
||||
err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj)
|
||||
if err != nil {
|
||||
return controllerutil.OperationResultNone, fmt.Errorf("failed to apply ignore differences: %w", err)
|
||||
}
|
||||
|
||||
// Normalize to avoid diffing on unimportant differences.
|
||||
normalizedLive.Spec = *argo.NormalizeApplicationSpec(&normalizedLive.Spec)
|
||||
obj.Spec = *argo.NormalizeApplicationSpec(&obj.Spec)
|
||||
|
||||
equality := conversion.EqualitiesOrDie(
|
||||
func(a, b resource.Quantity) bool {
|
||||
// Ignore formatting, only care that numeric value stayed the same.
|
||||
@@ -93,35 +79,23 @@ func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ign
|
||||
return a.Namespace == b.Namespace && a.Name == b.Name && a.Server == b.Server
|
||||
},
|
||||
)
|
||||
// make sure updated object has the same apiVersion & kind as original object
|
||||
if objKind, ok := obj.(schema.ObjectKind); ok {
|
||||
if existingKind, ok := existing.(schema.ObjectKind); ok {
|
||||
existingKind.SetGroupVersionKind(objKind.GroupVersionKind())
|
||||
}
|
||||
}
|
||||
|
||||
if equality.DeepEqual(normalizedLive, obj) {
|
||||
if equality.DeepEqual(existing, obj) {
|
||||
return controllerutil.OperationResultNone, nil
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(normalizedLive)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
LogPatch(logCtx, patch, obj)
|
||||
}
|
||||
if err := c.Patch(ctx, obj, patch); err != nil {
|
||||
if err := c.Patch(ctx, obj, client.MergeFrom(existing)); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
return controllerutil.OperationResultUpdated, nil
|
||||
}
|
||||
|
||||
func LogPatch(logCtx *log.Entry, patch client.Patch, obj *argov1alpha1.Application) {
|
||||
patchBytes, err := patch.Data(obj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to generate patch: %v", err)
|
||||
}
|
||||
// Get the patch as a plain object so it is easier to work with in json logs.
|
||||
var patchObj map[string]interface{}
|
||||
err = json.Unmarshal(patchBytes, &patchObj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to unmarshal patch: %v", err)
|
||||
}
|
||||
logCtx.WithField("patch", patchObj).Debug("patching application")
|
||||
}
|
||||
|
||||
// mutate wraps a MutateFn and applies validation to its result
|
||||
func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object) error {
|
||||
if err := f(); err != nil {
|
||||
@@ -132,71 +106,3 @@ func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the applications in place.
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application) error {
|
||||
if len(applicationSetIgnoreDifferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
generatedAppCopy := generatedApp.DeepCopy()
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
|
||||
WithNoCache().
|
||||
Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build diff config: %w", err)
|
||||
}
|
||||
unstructuredFound, err := appToUnstructured(found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
unstructuredGenerated, err := appToUnstructured(generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to normalize application spec: %w", err)
|
||||
}
|
||||
if len(result.Lives) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Lives))
|
||||
}
|
||||
foundJsonNormalized, err := json.Marshal(result.Lives[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
foundNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(foundJsonNormalized, &foundNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app to json: %w", err)
|
||||
}
|
||||
if len(result.Targets) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
|
||||
}
|
||||
foundNormalized.DeepCopyInto(found)
|
||||
generatedJsonNormalized, err := json.Marshal(result.Targets[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
generatedAppNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(generatedJsonNormalized, &generatedAppNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
|
||||
}
|
||||
generatedAppNormalized.DeepCopyInto(generatedApp)
|
||||
// Prohibit jq queries from mutating silly things.
|
||||
generatedApp.TypeMeta = generatedAppCopy.TypeMeta
|
||||
generatedApp.Name = generatedAppCopy.Name
|
||||
generatedApp.Namespace = generatedAppCopy.Namespace
|
||||
generatedApp.Operation = generatedAppCopy.Operation
|
||||
return nil
|
||||
}
|
||||
|
||||
func appToUnstructured(app client.Object) (*unstructured.Unstructured, error) {
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: u}, nil
|
||||
}
|
||||
|
||||
@@ -1,234 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_applyIgnoreDifferences(t *testing.T) {
|
||||
appMeta := metav1.TypeMeta{
|
||||
APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
|
||||
Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
|
||||
foundApp string
|
||||
generatedApp string
|
||||
expectedApp string
|
||||
}{
|
||||
{
|
||||
name: "empty ignoreDifferences",
|
||||
foundApp: `
|
||||
spec: {}`,
|
||||
generatedApp: `
|
||||
spec: {}`,
|
||||
expectedApp: `
|
||||
spec: {}`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore target revision with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: bar`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
|
||||
name: "ignore helm parameter with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: v1.0.0
|
||||
- name: another
|
||||
value: value`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore auto-sync in appset when it's not in the cluster with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
name: "ignore auto-sync in the cluster when it's not in the appset with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
|
||||
name: "ignore a one-off annotation with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
|
||||
},
|
||||
foundApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
generatedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
some.other: annotation`,
|
||||
expectedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
|
||||
name: "ignore the source.plugin field with a json pointer",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JSONPointers: []string{"/spec/source/plugin"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com/wrong`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
|
||||
require.NoError(t, err, tc.foundApp)
|
||||
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
|
||||
require.NoError(t, err, tc.generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp)
|
||||
require.NoError(t, err)
|
||||
yamlFound, err := yaml.Marshal(tc.foundApp)
|
||||
require.NoError(t, err)
|
||||
yamlExpected, err := yaml.Marshal(tc.expectedApp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(yamlExpected), string(yamlFound))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// toYAML takes an interface, marshals it to yaml, and returns a string. It will
|
||||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toYAML(v interface{}) (string, error) {
|
||||
data, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
// Swallow errors inside of a template.
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSuffix(string(data), "\n"), nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAML converts a YAML document into a map[string]interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string into
|
||||
// m["Error"] in the returned map.
|
||||
func fromYAML(str string) (map[string]interface{}, error) {
|
||||
m := map[string]interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAMLArray converts a YAML array into a []interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string as
|
||||
// the first and only item in the returned array.
|
||||
func fromYAMLArray(str string) ([]interface{}, error) {
|
||||
a := []interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
@@ -32,9 +32,6 @@ func init() {
|
||||
delete(sprigFuncMap, "expandenv")
|
||||
delete(sprigFuncMap, "getHostByName")
|
||||
sprigFuncMap["normalize"] = SanitizeName
|
||||
sprigFuncMap["toYaml"] = toYAML
|
||||
sprigFuncMap["fromYaml"] = fromYAML
|
||||
sprigFuncMap["fromYamlArray"] = fromYAMLArray
|
||||
}
|
||||
|
||||
type Renderer interface {
|
||||
@@ -434,6 +431,23 @@ func NormalizeBitbucketBasePath(basePath string) string {
|
||||
return basePath
|
||||
}
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
|
||||
func getTlsConfigWithCACert(scmRootCAPath string) *tls.Config {
|
||||
|
||||
tlsConfig := &tls.Config{}
|
||||
|
||||
@@ -555,64 +555,6 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
templateOptions: []string{"missingkey=error"},
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: :1:6: executing "" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
},
|
||||
{
|
||||
name: "toYaml",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
params: map[string]interface{}{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": map[string]interface{}{
|
||||
"bool": true,
|
||||
"number": 2,
|
||||
"str": "Hello world",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "toYaml Error",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: :1:3: executing \"\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
params: map[string]interface{}{
|
||||
"foo": func(test *string) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
params: map[string]interface{}{
|
||||
"value": "hello: world",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml error",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: :1:8: executing \"\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
params: map[string]interface{}{
|
||||
"value": "- hello world\n- bonjour tout le monde",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray error",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: :1:3: executing \"\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -234,7 +234,7 @@
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "forces application reconciliation if set to 'hard'.",
|
||||
"description": "forces application reconciliation if set to true.",
|
||||
"name": "refresh",
|
||||
"in": "query"
|
||||
},
|
||||
@@ -573,7 +573,7 @@
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "forces application reconciliation if set to 'hard'.",
|
||||
"description": "forces application reconciliation if set to true.",
|
||||
"name": "refresh",
|
||||
"in": "query"
|
||||
},
|
||||
@@ -3816,7 +3816,7 @@
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "forces application reconciliation if set to 'hard'.",
|
||||
"description": "forces application reconciliation if set to true.",
|
||||
"name": "refresh",
|
||||
"in": "query"
|
||||
},
|
||||
@@ -4462,9 +4462,6 @@
|
||||
"clientID": {
|
||||
"type": "string"
|
||||
},
|
||||
"enablePKCEAuthentication": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"idTokenClaims": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
@@ -5092,7 +5089,7 @@
|
||||
}
|
||||
},
|
||||
"runtimeRawExtension": {
|
||||
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
|
||||
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this:\n{\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"raw": {
|
||||
@@ -5499,6 +5496,10 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"clusterName": {
|
||||
"description": "Deprecated: ClusterName is a legacy field that was always cleared by\nthe system and never used; it will be removed completely in 1.25.\n\nThe name in the go struct is changed to help clients detect\naccidental use.\n\n+optional",
|
||||
"type": "string"
|
||||
},
|
||||
"creationTimestamp": {
|
||||
"$ref": "#/definitions/v1Time"
|
||||
},
|
||||
@@ -5570,8 +5571,8 @@
|
||||
}
|
||||
},
|
||||
"v1ObjectReference": {
|
||||
"description": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
|
||||
"type": "object",
|
||||
"title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"type": "string",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
|
||||
"github.com/argoproj/pkg/stats"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -46,7 +45,6 @@ const (
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
workqueueRateLimit ratelimiter.AppControllerRateLimiterConfig
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
@@ -143,6 +141,7 @@ func NewCommand() *cobra.Command {
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -161,7 +160,6 @@ func NewCommand() *cobra.Command {
|
||||
persistResourceHealth,
|
||||
clusterFilter,
|
||||
applicationNamespaces,
|
||||
&workqueueRateLimit,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
@@ -207,15 +205,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that applications are allowed to be reconciled from")
|
||||
command.Flags().BoolVar(&persistResourceHealth, "persist-resource-health", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH", true), "Enables storing the managed resources health in the Application CRD")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
|
||||
// global queue rate limit config
|
||||
command.Flags().Int64Var(&workqueueRateLimit.BucketSize, "wq-bucket-size", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_SIZE", 500, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket Size, default 500")
|
||||
command.Flags().Int64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_QPS", 50, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket QPS, default 50")
|
||||
// individual item rate limit config
|
||||
// when WORKQUEUE_FAILURE_COOLDOWN is 0 per item rate limiting is disabled(default)
|
||||
command.Flags().DurationVar(&workqueueRateLimit.FailureCoolDown, "wq-cooldown-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_FAILURE_COOLDOWN_NS", 0, 0, (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)")
|
||||
command.Flags().DurationVar(&workqueueRateLimit.BaseDelay, "wq-basedelay-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_BASE_DELAY_NS", time.Millisecond.Nanoseconds(), time.Nanosecond.Nanoseconds(), (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Base Delay duration in nanoseconds, default 1000000 (1ms)")
|
||||
command.Flags().DurationVar(&workqueueRateLimit.MaxDelay, "wq-maxdelay-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_MAX_DELAY_NS", time.Second.Nanoseconds(), 1*time.Millisecond.Nanoseconds(), (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Max Delay duration in nanoseconds, default 1000000000 (1s)")
|
||||
command.Flags().Float64Var(&workqueueRateLimit.BackoffFactor, "wq-backoff-factor", env.ParseFloat64FromEnv("WORKQUEUE_BACKOFF_FACTOR", 1.5, 0, math.MaxFloat64), "Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5")
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
|
||||
@@ -65,7 +65,6 @@ func NewCommand() *cobra.Command {
|
||||
allowedScmProviders []string
|
||||
globalPreservedAnnotations []string
|
||||
globalPreservedLabels []string
|
||||
enableScmProviders bool
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
@@ -108,8 +107,8 @@ func NewCommand() *cobra.Command {
|
||||
// If the applicationset-namespaces contains only one namespace it corresponds to the current namespace
|
||||
if len(applicationSetNamespaces) == 1 {
|
||||
watchedNamespace = (applicationSetNamespaces)[0]
|
||||
} else if enableScmProviders && len(allowedScmProviders) == 0 {
|
||||
log.Error("When enabling applicationset in any namespace using applicationset-namespaces, you must either set --enable-scm-providers=false or specify --allowed-scm-providers")
|
||||
} else if len(allowedScmProviders) == 0 {
|
||||
log.Error("When enabling applicationset in any namespace using applicationset-namespaces, allowed-scm-providers is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -163,9 +162,9 @@ func NewCommand() *cobra.Command {
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(mgr.GetClient(), ctx, k8sClient, namespace),
|
||||
"Git": generators.NewGitGenerator(argoCDService),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders, enableScmProviders),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
|
||||
"PullRequest": generators.NewPullRequestGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders, enableScmProviders),
|
||||
"PullRequest": generators.NewPullRequestGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders),
|
||||
"Plugin": generators.NewPluginGenerator(mgr.GetClient(), ctx, k8sClient, namespace),
|
||||
}
|
||||
|
||||
@@ -248,8 +247,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&debugLog, "debug", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DEBUG", false), "Print debug logs. Takes precedence over loglevel")
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed custom SCM provider API URLs. This restriction does not apply to SCM or PR generators which do not accept a custom API URL. (Default: Empty = all)")
|
||||
command.Flags().BoolVar(&enableScmProviders, "enable-scm-providers", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_SCM_PROVIDERS", true), "Enable retrieving information from SCM providers, used by the SCM and PR generators (Default: true)")
|
||||
command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed scm providers. (Default: Empty = all)")
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
|
||||
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
|
||||
command.Flags().BoolVar(&enableNewGitFileGlobbing, "enable-new-git-file-globbing", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING", false), "Enable new globbing in Git files generator.")
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/kube"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
"github.com/argoproj/argo-cd/v2/util/tls"
|
||||
traceutil "github.com/argoproj/argo-cd/v2/util/trace"
|
||||
)
|
||||
@@ -212,13 +211,6 @@ func NewCommand() *cobra.Command {
|
||||
}
|
||||
}
|
||||
},
|
||||
Example: templates.Examples(`
|
||||
# Start the Argo CD API server with default settings
|
||||
$ argocd-server
|
||||
|
||||
# Start the Argo CD API server on a custom port and enable tracing
|
||||
$ argocd-server --port 8888 --otlp-address localhost:4317
|
||||
`),
|
||||
}
|
||||
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(command)
|
||||
|
||||
@@ -26,26 +26,12 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/localconfig"
|
||||
sessionutil "github.com/argoproj/argo-cd/v2/util/session"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
)
|
||||
|
||||
func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "account",
|
||||
Short: "Manage account settings",
|
||||
Example: templates.Examples(`
|
||||
# List accounts
|
||||
argocd account list
|
||||
|
||||
# Update the current user's password
|
||||
argocd account update-password
|
||||
|
||||
# Can I sync any app?
|
||||
argocd account can-i sync applications '*'
|
||||
|
||||
# Get User information
|
||||
argocd account get-user-info
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -157,13 +143,6 @@ func NewAccountGetUserInfoCommand(clientOpts *argocdclient.ClientOptions) *cobra
|
||||
var command = &cobra.Command{
|
||||
Use: "get-user-info",
|
||||
Short: "Get user info",
|
||||
Example: templates.Examples(`
|
||||
# Get User information for the currently logged-in user (see 'argocd login')
|
||||
argocd account get-user-info
|
||||
|
||||
# Get User information in yaml format
|
||||
argocd account get-user-info -o yaml
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -48,87 +48,6 @@ func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
},
|
||||
Example: `# List all clusters
|
||||
$ argocd admin cluster list
|
||||
|
||||
# Add a new cluster
|
||||
$ argocd admin cluster add my-cluster --name my-cluster --in-cluster-context
|
||||
|
||||
# Remove a cluster
|
||||
argocd admin cluster remove my-cluster
|
||||
|
||||
# List all projects
|
||||
$ argocd admin project list
|
||||
|
||||
# Create a new project
|
||||
$argocd admin project create my-project --src-namespace my-source-namespace --dest-namespace my-dest-namespace
|
||||
|
||||
# Update a project
|
||||
$ argocd admin project update my-project --src-namespace my-updated-source-namespace --dest-namespace my-updated-dest-namespace
|
||||
|
||||
# Delete a project
|
||||
$ argocd admin project delete my-project
|
||||
|
||||
# List all settings
|
||||
$ argocd admin settings list
|
||||
|
||||
# Get the current settings
|
||||
$ argocd admin settings get
|
||||
|
||||
# Update settings
|
||||
$ argocd admin settings update --repository.resync --value 15
|
||||
|
||||
# List all applications
|
||||
$ argocd admin app list
|
||||
|
||||
# Get application details
|
||||
$ argocd admin app get my-app
|
||||
|
||||
# Sync an application
|
||||
$ argocd admin app sync my-app
|
||||
|
||||
# Pause an application
|
||||
$ argocd admin app pause my-app
|
||||
|
||||
# Resume an application
|
||||
$ argocd admin app resume my-app
|
||||
|
||||
# List all repositories
|
||||
$ argocd admin repo list
|
||||
|
||||
# Add a repository
|
||||
$ argocd admin repo add https://github.com/argoproj/my-repo.git
|
||||
|
||||
# Remove a repository
|
||||
$ argocd admin repo remove https://github.com/argoproj/my-repo.git
|
||||
|
||||
# Import an application from a YAML file
|
||||
$ argocd admin app import -f my-app.yaml
|
||||
|
||||
# Export an application to a YAML file
|
||||
$ argocd admin app export my-app -o my-exported-app.yaml
|
||||
|
||||
# Access the Argo CD web UI
|
||||
$ argocd admin dashboard
|
||||
|
||||
# List notifications
|
||||
$ argocd admin notification list
|
||||
|
||||
# Get notification details
|
||||
$ argocd admin notification get my-notification
|
||||
|
||||
# Create a new notification
|
||||
$ argocd admin notification create my-notification -f notification-config.yaml
|
||||
|
||||
# Update a notification
|
||||
$ argocd admin notification update my-notification -f updated-notification-config.yaml
|
||||
|
||||
# Delete a notification
|
||||
$ argocd admin notification delete my-notification
|
||||
|
||||
# Reset the initial admin password
|
||||
$ argocd admin initial-password reset
|
||||
`,
|
||||
}
|
||||
|
||||
command.AddCommand(NewClusterCommand(clientOpts, pathOpts))
|
||||
|
||||
@@ -45,16 +45,6 @@ func NewAppCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "app",
|
||||
Short: "Manage applications configuration",
|
||||
Example: `
|
||||
# Compare results of two reconciliations and print diff
|
||||
argocd admin app diff-reconcile-results APPNAME [flags]
|
||||
|
||||
# Generate declarative config for an application
|
||||
argocd admin app generate-spec APPNAME
|
||||
|
||||
# Reconcile all applications and store reconciliation summary in the specified file
|
||||
argocd admin app get-reconcile-results APPNAME
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
},
|
||||
|
||||
@@ -124,7 +124,7 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
if replicas > 0 {
|
||||
distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm)
|
||||
distributionFunction(&cluster)
|
||||
cluster.Shard = pointer.Int64(int64(clusterShard))
|
||||
cluster.Shard = pointer.Int64Ptr(int64(clusterShard))
|
||||
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
|
||||
}
|
||||
|
||||
|
||||
@@ -28,19 +28,10 @@ func NewDashboardCommand() *cobra.Command {
|
||||
|
||||
compression, err := cache.CompressionTypeFromString(compressionStr)
|
||||
errors.CheckError(err)
|
||||
errors.CheckError(headless.MaybeStartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression))
|
||||
errors.CheckError(headless.StartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression))
|
||||
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
|
||||
<-ctx.Done()
|
||||
},
|
||||
Example: `# Start the Argo CD Web UI locally on the default port and address
|
||||
$ argocd admin dashboard
|
||||
|
||||
# Start the Argo CD Web UI locally on a custom port and address
|
||||
$ argocd admin dashboard --port 8080 --address 127.0.0.1
|
||||
|
||||
# Start the Argo CD Web UI with GZip compression
|
||||
$ argocd admin dashboard --redis-compress gzip
|
||||
`,
|
||||
}
|
||||
initialize.InitCommand(cmd)
|
||||
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -48,17 +47,6 @@ func NewGenProjectSpecCommand() *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "generate-spec PROJECT",
|
||||
Short: "Generate declarative config for a project",
|
||||
Example: templates.Examples(`
|
||||
# Generate a YAML configuration for a project named "myproject"
|
||||
argocd admin projects generate-spec myproject
|
||||
|
||||
# Generate a JSON configuration for a project named "anotherproject" and specify an output file
|
||||
argocd admin projects generate-spec anotherproject --output json --file config.json
|
||||
|
||||
# Generate a YAML configuration for a project named "someproject" and write it back to the input file
|
||||
argocd admin projects generate-spec someproject --inline
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
proj, err := cmdutil.ConstructAppProj(fileURL, args, opts, c)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -41,8 +41,6 @@ func NewProjectAllowListGenCommand() *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "generate-allow-list CLUSTERROLE_PATH PROJ_NAME",
|
||||
Short: "Generates project allow list from the specified clusterRole file",
|
||||
Example: `# Generates project allow list from the specified clusterRole file
|
||||
argocd admin proj generate-allow-list /path/to/clusterrole.yaml my-project`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 2 {
|
||||
c.HelpFunc()(c, args)
|
||||
|
||||
@@ -189,6 +189,7 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(command)
|
||||
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
|
||||
command.Flags().StringVar(&defaultRole, "default-role", "", "name of the default role to use")
|
||||
@@ -201,55 +202,24 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
|
||||
// NewRBACValidateCommand returns a new rbac validate command
|
||||
func NewRBACValidateCommand() *cobra.Command {
|
||||
var (
|
||||
policyFile string
|
||||
namespace string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
policyFile string
|
||||
)
|
||||
|
||||
var command = &cobra.Command{
|
||||
Use: "validate [--policy-file POLICYFILE] [--namespace NAMESPACE]",
|
||||
Use: "validate --policy-file=POLICYFILE",
|
||||
Short: "Validate RBAC policy",
|
||||
Long: `
|
||||
Validates an RBAC policy for being syntactically correct. The policy must be
|
||||
a local file or a K8s ConfigMap in the provided namespace, and in either CSV or K8s ConfigMap format.
|
||||
`,
|
||||
Example: `
|
||||
# Check whether a given policy file is valid using a local policy.csv file.
|
||||
argocd admin settings rbac validate --policy-file policy.csv
|
||||
|
||||
# Policy file can also be K8s config map with data keys like argocd-rbac-cm,
|
||||
# i.e. 'policy.csv' and (optionally) 'policy.default'
|
||||
argocd admin settings rbac validate --policy-file argocd-rbac-cm.yaml
|
||||
|
||||
# If --policy-file is not given, and instead --namespace is giventhe ConfigMap 'argocd-rbac-cm'
|
||||
# from K8s is used.
|
||||
argocd admin settings rbac validate --namespace argocd
|
||||
|
||||
# Either --policy-file or --namespace must be given.
|
||||
a local file, and in either CSV or K8s ConfigMap format.
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) > 0 {
|
||||
if policyFile == "" {
|
||||
c.HelpFunc()(c, args)
|
||||
log.Fatalf("too many arguments")
|
||||
log.Fatalf("Please specify policy to validate using --policy-file")
|
||||
}
|
||||
|
||||
if (namespace == "" && policyFile == "") || (namespace != "" && policyFile != "") {
|
||||
c.HelpFunc()(c, args)
|
||||
log.Fatalf("please provide exactly one of --policy-file or --namespace")
|
||||
}
|
||||
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("could not get config to create k8s client: %v", err)
|
||||
}
|
||||
realClientset, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("could not create k8s client: %v", err)
|
||||
}
|
||||
|
||||
userPolicy, _, _ := getPolicy(ctx, policyFile, realClientset, namespace)
|
||||
userPolicy, _, _ := getPolicy(ctx, policyFile, nil, "")
|
||||
if userPolicy != "" {
|
||||
if err := rbac.ValidatePolicy(userPolicy); err == nil {
|
||||
fmt.Printf("Policy is valid.\n")
|
||||
@@ -258,15 +228,11 @@ argocd admin settings rbac validate --namespace argocd
|
||||
fmt.Printf("Policy is invalid: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
log.Fatalf("Policy is empty or could not be loaded.")
|
||||
}
|
||||
},
|
||||
}
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(command)
|
||||
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
|
||||
command.Flags().StringVar(&namespace, "namespace", "", "namespace to get argo rbac configmap from")
|
||||
|
||||
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -5,42 +5,15 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/assets"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/assets"
|
||||
)
|
||||
|
||||
type FakeClientConfig struct {
|
||||
clientConfig clientcmd.ClientConfig
|
||||
}
|
||||
|
||||
func NewFakeClientConfig(clientConfig clientcmd.ClientConfig) *FakeClientConfig {
|
||||
return &FakeClientConfig{clientConfig: clientConfig}
|
||||
}
|
||||
|
||||
func (f *FakeClientConfig) RawConfig() (clientcmdapi.Config, error) {
|
||||
config, err := f.clientConfig.RawConfig()
|
||||
return config, err
|
||||
}
|
||||
|
||||
func (f *FakeClientConfig) ClientConfig() (*restclient.Config, error) {
|
||||
return f.clientConfig.ClientConfig()
|
||||
}
|
||||
|
||||
func (f *FakeClientConfig) Namespace() (string, bool, error) {
|
||||
return f.clientConfig.Namespace()
|
||||
}
|
||||
|
||||
func (f *FakeClientConfig) ConfigAccess() clientcmd.ConfigAccess {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_isValidRBACAction(t *testing.T) {
|
||||
for k := range validRBACActions {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
@@ -227,19 +200,3 @@ p, role:, certificates, get, .*, allow`
|
||||
require.True(t, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewRBACCanCommand(t *testing.T) {
|
||||
command := NewRBACCanCommand()
|
||||
|
||||
require.NotNil(t, command)
|
||||
assert.Equal(t, "can", command.Name())
|
||||
assert.Equal(t, "Check RBAC permissions for a role or subject", command.Short)
|
||||
}
|
||||
|
||||
func TestNewRBACValidateCommand(t *testing.T) {
|
||||
command := NewRBACValidateCommand()
|
||||
|
||||
require.NotNil(t, command)
|
||||
assert.Equal(t, "validate", command.Name())
|
||||
assert.Equal(t, "Validate RBAC policy", command.Short)
|
||||
}
|
||||
|
||||
@@ -50,7 +50,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/grpc"
|
||||
argoio "github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/manifeststream"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
"github.com/argoproj/argo-cd/v2/util/text/label"
|
||||
)
|
||||
|
||||
@@ -318,35 +317,6 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
var command = &cobra.Command{
|
||||
Use: "get APPNAME",
|
||||
Short: "Get application details",
|
||||
Example: templates.Examples(`
|
||||
# Get basic details about the application "my-app" in wide format
|
||||
argocd app get my-app -o wide
|
||||
|
||||
# Get detailed information about the application "my-app" in YAML format
|
||||
argocd app get my-app -o yaml
|
||||
|
||||
# Get details of the application "my-app" in JSON format
|
||||
argocd get my-app -o json
|
||||
|
||||
# Get application details and include information about the current operation
|
||||
argocd app get my-app --show-operation
|
||||
|
||||
# Show application parameters and overrides
|
||||
argocd app get my-app --show-params
|
||||
|
||||
# Refresh application data when retrieving
|
||||
argocd app get my-app --refresh
|
||||
|
||||
# Perform a hard refresh, including refreshing application data and target manifests cache
|
||||
argocd app get my-app --hard-refresh
|
||||
|
||||
# Get application details and display them in a tree format
|
||||
argocd app get my-app --output tree
|
||||
|
||||
# Get application details and display them in a detailed tree format
|
||||
argocd app get my-app --output tree=detailed
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
if len(args) == 0 {
|
||||
@@ -431,44 +401,6 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
var command = &cobra.Command{
|
||||
Use: "logs APPNAME",
|
||||
Short: "Get logs of application pods",
|
||||
Example: templates.Examples(`
|
||||
# Get logs of pods associated with the application "my-app"
|
||||
argocd app logs my-app
|
||||
|
||||
# Get logs of pods associated with the application "my-app" in a specific resource group
|
||||
argocd app logs my-app --group my-group
|
||||
|
||||
# Get logs of pods associated with the application "my-app" in a specific resource kind
|
||||
argocd app logs my-app --kind my-kind
|
||||
|
||||
# Get logs of pods associated with the application "my-app" in a specific namespace
|
||||
argocd app logs my-app --namespace my-namespace
|
||||
|
||||
# Get logs of pods associated with the application "my-app" for a specific resource name
|
||||
argocd app logs my-app --name my-resource
|
||||
|
||||
# Stream logs in real-time for the application "my-app"
|
||||
argocd app logs my-app -f
|
||||
|
||||
# Get the last N lines of logs for the application "my-app"
|
||||
argocd app logs my-app --tail 100
|
||||
|
||||
# Get logs since a specified number of seconds ago
|
||||
argocd app logs my-app --since-seconds 3600
|
||||
|
||||
# Get logs until a specified time (format: "2023-10-10T15:30:00Z")
|
||||
argocd app logs my-app --until-time "2023-10-10T15:30:00Z"
|
||||
|
||||
# Filter logs to show only those containing a specific string
|
||||
argocd app logs my-app --filter "error"
|
||||
|
||||
# Get logs for a specific container within the pods
|
||||
argocd app logs my-app -c my-container
|
||||
|
||||
# Get previously terminated container logs
|
||||
argocd app logs my-app -p
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -524,8 +456,8 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} // Done with receive message
|
||||
} // Done with retry
|
||||
} //Done with receive message
|
||||
} //Done with retry
|
||||
},
|
||||
}
|
||||
|
||||
@@ -716,23 +648,6 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
var command = &cobra.Command{
|
||||
Use: "set APPNAME",
|
||||
Short: "Set application parameters",
|
||||
Example: templates.Examples(`
|
||||
# Set application parameters for the application "my-app"
|
||||
argocd app set my-app --parameter key1=value1 --parameter key2=value2
|
||||
|
||||
# Set and validate application parameters for "my-app"
|
||||
argocd app set my-app --parameter key1=value1 --parameter key2=value2 --validate
|
||||
|
||||
# Set and override application parameters with JSON or YAML file
|
||||
argocd app set my-app --from-file path/to/parameters.json
|
||||
|
||||
# Set and override application parameters with a parameter file
|
||||
argocd app set my-app --parameter-file path/to/parameter-file.yaml
|
||||
|
||||
# Set application parameters and specify the namespace
|
||||
argocd app set my-app --parameter key1=value1 --parameter key2=value2 --namespace my-namespace
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -889,7 +804,7 @@ func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, n
|
||||
for i, item := range source.Kustomize.Images {
|
||||
if argoappv1.KustomizeImage(kustomizeImage).Match(item) {
|
||||
updated = true
|
||||
// remove i
|
||||
//remove i
|
||||
a := source.Kustomize.Images
|
||||
copy(a[i:], a[i+1:]) // Shift a[i+1:] left one index.
|
||||
a[len(a)-1] = "" // Erase last element (write zero value).
|
||||
@@ -1904,7 +1819,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
Backoff: &argoappv1.Backoff{
|
||||
Duration: retryBackoffDuration.String(),
|
||||
MaxDuration: retryBackoffMaxDuration.String(),
|
||||
Factor: pointer.Int64(retryBackoffFactor),
|
||||
Factor: pointer.Int64Ptr(retryBackoffFactor),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -2143,7 +2058,7 @@ func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string
|
||||
} else if watch.degraded && watch.health {
|
||||
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
|
||||
healthStatus == string(health.HealthStatusDegraded)
|
||||
// below are good
|
||||
//below are good
|
||||
} else if watch.suspended && watch.health {
|
||||
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
|
||||
healthStatus == string(health.HealthStatusSuspended)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
"os"
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
@@ -34,22 +33,11 @@ type DisplayedAction struct {
|
||||
Disabled bool
|
||||
}
|
||||
|
||||
var (
|
||||
appActionExample = templates.Examples(`
|
||||
# List all the available actions for an application
|
||||
argocd app actions list APPNAME
|
||||
|
||||
# Run an available action for an application
|
||||
argocd app actions run APPNAME ACTION --kind KIND [--resource-name RESOURCE] [--namespace NAMESPACE] [--group GROUP]
|
||||
`)
|
||||
)
|
||||
|
||||
// NewApplicationResourceActionsCommand returns a new instance of an `argocd app actions` command
|
||||
func NewApplicationResourceActionsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "actions",
|
||||
Short: "Manage Resource actions",
|
||||
Example: appActionExample,
|
||||
Use: "actions",
|
||||
Short: "Manage Resource actions",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -70,10 +58,6 @@ func NewApplicationResourceActionsListCommand(clientOpts *argocdclient.ClientOpt
|
||||
var command = &cobra.Command{
|
||||
Use: "list APPNAME",
|
||||
Short: "Lists available actions on a resource",
|
||||
Example: templates.Examples(`
|
||||
# List all the available actions for an application
|
||||
argocd app actions list APPNAME
|
||||
`),
|
||||
}
|
||||
command.Run = func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
@@ -152,10 +136,6 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti
|
||||
var command = &cobra.Command{
|
||||
Use: "run APPNAME ACTION",
|
||||
Short: "Runs an available action on resource(s)",
|
||||
Example: templates.Examples(`
|
||||
# Run an available action for an application
|
||||
argocd app actions run APPNAME ACTION --kind KIND [--resource-name RESOURCE] [--namespace NAMESPACE] [--group GROUP]
|
||||
`),
|
||||
}
|
||||
|
||||
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource")
|
||||
|
||||
@@ -3,7 +3,6 @@ package commands
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
@@ -19,6 +18,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
argoio "github.com/argoproj/argo-cd/v2/util/io"
|
||||
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
@@ -29,7 +30,6 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
var kind string
|
||||
var group string
|
||||
var all bool
|
||||
var project string
|
||||
command := &cobra.Command{
|
||||
Use: "patch-resource APPNAME",
|
||||
Short: "Patch resource in an application",
|
||||
@@ -46,7 +46,6 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
command.Flags().StringVar(&group, "group", "", "Group")
|
||||
command.Flags().StringVar(&namespace, "namespace", "", "Namespace")
|
||||
command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources")
|
||||
command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
|
||||
command.Run = func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -78,7 +77,6 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
Kind: pointer.String(gvk.Kind),
|
||||
Patch: pointer.String(patch),
|
||||
PatchType: pointer.String(patchType),
|
||||
Project: pointer.String(project),
|
||||
})
|
||||
errors.CheckError(err)
|
||||
log.Infof("Resource '%s' patched", obj.GetName())
|
||||
@@ -96,7 +94,6 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
var force bool
|
||||
var orphan bool
|
||||
var all bool
|
||||
var project string
|
||||
command := &cobra.Command{
|
||||
Use: "delete-resource APPNAME",
|
||||
Short: "Delete resource in an application",
|
||||
@@ -111,7 +108,6 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
command.Flags().BoolVar(&force, "force", false, "Indicates whether to orphan the dependents of the deleted resource")
|
||||
command.Flags().BoolVar(&orphan, "orphan", false, "Indicates whether to force delete the resource")
|
||||
command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources")
|
||||
command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
|
||||
command.Run = func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -143,7 +139,6 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
Kind: pointer.String(gvk.Kind),
|
||||
Force: &force,
|
||||
Orphan: &orphan,
|
||||
Project: pointer.String(project),
|
||||
})
|
||||
errors.CheckError(err)
|
||||
log.Infof("Resource '%s' deleted", obj.GetName())
|
||||
@@ -255,7 +250,6 @@ func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.Appli
|
||||
func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var orphaned bool
|
||||
var output string
|
||||
var project string
|
||||
var command = &cobra.Command{
|
||||
Use: "resources APPNAME",
|
||||
Short: "List resource of application",
|
||||
@@ -272,7 +266,6 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
|
||||
appResourceTree, err := appIf.ResourceTree(ctx, &applicationpkg.ResourcesQuery{
|
||||
ApplicationName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Project: &project,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
printResources(listAll, orphaned, appResourceTree, output)
|
||||
@@ -280,6 +273,5 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
|
||||
}
|
||||
command.Flags().BoolVar(&orphaned, "orphaned", false, "Lists only orphaned resources")
|
||||
command.Flags().StringVar(&output, "output", "", "Provides the tree view of the resources")
|
||||
command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -67,10 +67,6 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
var command = &cobra.Command{
|
||||
Use: "get APPSETNAME",
|
||||
Short: "Get ApplicationSet details",
|
||||
Example: templates.Examples(`
|
||||
# Get ApplicationSets
|
||||
argocd appset get APPSETNAME
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -15,9 +15,7 @@ func NewBcryptCmd() *cobra.Command {
|
||||
)
|
||||
var bcryptCmd = &cobra.Command{
|
||||
Use: "bcrypt",
|
||||
Short: "Generate bcrypt hash for any password",
|
||||
Example: `# Generate bcrypt hash for any password
|
||||
argocd account bcrypt --password YOUR_PASSWORD`,
|
||||
Short: "Generate bcrypt hash for the admin password",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
bytePassword := []byte(password)
|
||||
// Hashing the password
|
||||
|
||||
@@ -485,23 +485,6 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
}
|
||||
},
|
||||
Example: `
|
||||
# List Clusters in Default "Wide" Format
|
||||
argocd cluster list
|
||||
|
||||
# List Cluster via specifing the server
|
||||
argocd cluster list --server <ARGOCD_SERVER_ADDRESS>
|
||||
|
||||
# List Clusters in JSON Format
|
||||
argocd cluster list -o json --server <ARGOCD_SERVER_ADDRESS>
|
||||
|
||||
# List Clusters in YAML Format
|
||||
argocd cluster list -o yaml --server <ARGOCD_SERVER_ADDRESS>
|
||||
|
||||
# List Clusters that have been added to your Argo CD
|
||||
argocd cluster list -o server <ARGOCD_SERVER_ADDRESS>
|
||||
|
||||
`,
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|server")
|
||||
return command
|
||||
|
||||
@@ -211,13 +211,6 @@ compdef _argocd argocd
|
||||
Optionally, also add the following, in case you are getting errors involving compdef & compinit such as command not found: compdef:
|
||||
autoload -Uz compinit
|
||||
compinit
|
||||
`,
|
||||
Example: `# For bash
|
||||
$ source <(argocd completion bash)
|
||||
|
||||
# For zsh
|
||||
$ argocd completion zsh > _argocd
|
||||
$ source _argocd
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
|
||||
@@ -22,14 +22,6 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
Use: "context [CONTEXT]",
|
||||
Aliases: []string{"ctx"},
|
||||
Short: "Switch between contexts",
|
||||
Example: `# List Argo CD Contexts
|
||||
argocd context
|
||||
|
||||
# Switch Argo CD context
|
||||
argocd context cd.argoproj.io
|
||||
|
||||
# Delete Argo CD context
|
||||
argocd context cd.argoproj.io --delete`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
argoio "github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
)
|
||||
|
||||
// NewGPGCommand returns a new instance of an `argocd repo` command
|
||||
@@ -43,17 +42,6 @@ func NewGPGListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List configured GPG public keys",
|
||||
Example: templates.Examples(`
|
||||
# List all configured GPG public keys in wide format (default).
|
||||
argocd gpg list
|
||||
|
||||
# List all configured GPG public keys in JSON format.
|
||||
argocd gpg list -o json
|
||||
|
||||
# List all configured GPG public keys in YAML format.
|
||||
argocd gpg list -o yaml
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -84,17 +72,6 @@ func NewGPGGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "get KEYID",
|
||||
Short: "Get the GPG public key with ID <KEYID> from the server",
|
||||
Example: templates.Examples(`
|
||||
# Get a GPG public key with the specified KEYID in wide format (default).
|
||||
argocd gpg get KEYID
|
||||
|
||||
# Get a GPG public key with the specified KEYID in JSON format.
|
||||
argocd gpg get KEYID -o json
|
||||
|
||||
# Get a GPG public key with the specified KEYID in YAML format.
|
||||
argocd gpg get KEYID -o yaml
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -132,11 +109,6 @@ func NewGPGAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Adds a GPG public key to the server's keyring",
|
||||
Example: templates.Examples(`
|
||||
# Add a GPG public key to the server's keyring from a file.
|
||||
argocd gpg add --from /path/to/keyfile
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -148,17 +148,13 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MaybeStartLocalServer allows executing command in a headless mode. If we're in core mode, starts the Argo CD API
|
||||
// server on the fly and changes provided client options to use started API server port.
|
||||
//
|
||||
// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will
|
||||
// not start the local server.
|
||||
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType) error {
|
||||
// StartLocalServer allows executing command in a headless mode: on the fly starts Argo CD API server and
|
||||
// changes provided client options to use started API server port
|
||||
func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType) error {
|
||||
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
|
||||
clientConfig := cli.AddKubectlFlagsToSet(flags)
|
||||
startInProcessAPI := clientOpts.Core
|
||||
if !startInProcessAPI {
|
||||
// Core mode is enabled on client options. Check the local config to see if we should start the API server.
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading local config: %w", err)
|
||||
@@ -168,11 +164,9 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving context: %w", err)
|
||||
}
|
||||
// There was a local config file, so determine whether core mode is enabled per the config file.
|
||||
startInProcessAPI = configCtx.Server.Core
|
||||
}
|
||||
}
|
||||
// If we're in core mode, start the API server on the fly.
|
||||
if !startInProcessAPI {
|
||||
return nil
|
||||
}
|
||||
@@ -263,9 +257,7 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
|
||||
ctx := c.Context()
|
||||
|
||||
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
|
||||
// If we're in core mode, start the API server on the fly and configure the client `opts` to use it.
|
||||
// If we're not in core mode, this function call will do nothing.
|
||||
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone)
|
||||
err := StartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -18,10 +18,6 @@ func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
Use: "logout CONTEXT",
|
||||
Short: "Log out from Argo CD",
|
||||
Long: "Log out from Argo CD",
|
||||
Example: `# To log out of argocd
|
||||
$ argocd logout
|
||||
# This can be helpful for security reasons or when you want to switch between different Argo CD contexts or accounts.
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/gpg"
|
||||
argoio "github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
)
|
||||
|
||||
type policyOpts struct {
|
||||
@@ -40,19 +39,6 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "proj",
|
||||
Short: "Manage projects",
|
||||
Example: templates.Examples(`
|
||||
# List all available projects
|
||||
argocd proj list
|
||||
|
||||
# Create a new project with name PROJECT
|
||||
argocd proj create PROJECT
|
||||
|
||||
# Delete the project with name PROJECT
|
||||
argocd proj delete PROJECT
|
||||
|
||||
# Edit the information on project with name PROJECT
|
||||
argocd proj edit PROJECT
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -102,13 +88,6 @@ func NewProjectCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
var command = &cobra.Command{
|
||||
Use: "create PROJECT",
|
||||
Short: "Create a project",
|
||||
Example: templates.Examples(`
|
||||
# Create a new project with name PROJECT
|
||||
argocd proj create PROJECT
|
||||
|
||||
# Create a new project with name PROJECT from a file or URL to a Kubernetes manifest
|
||||
argocd proj create PROJECT -f FILE|URL
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -139,13 +118,6 @@ func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
var command = &cobra.Command{
|
||||
Use: "set PROJECT",
|
||||
Short: "Set project parameters",
|
||||
Example: templates.Examples(`
|
||||
# Set project parameters with some allowed cluster resources [RES1,RES2,...] for project with name PROJECT
|
||||
argocd proj set PROJECT --allow-cluster-resource [RES1,RES2,...]
|
||||
|
||||
# Set project parameters with some denied namespaced resources [RES1,RES2,...] for project with name PROJECT
|
||||
argocd proj set PROJECT ---deny-namespaced-resource [RES1,RES2,...]
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -179,10 +151,6 @@ func NewProjectAddSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) *c
|
||||
var command = &cobra.Command{
|
||||
Use: "add-signature-key PROJECT KEY-ID",
|
||||
Short: "Add GnuPG signature key to project",
|
||||
Example: templates.Examples(`
|
||||
# Add GnuPG signature key KEY-ID to project PROJECT
|
||||
argocd proj add-signature-key PROJECT KEY-ID
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -221,10 +189,6 @@ func NewProjectRemoveSignatureKeyCommand(clientOpts *argocdclient.ClientOptions)
|
||||
var command = &cobra.Command{
|
||||
Use: "remove-signature-key PROJECT KEY-ID",
|
||||
Short: "Remove GnuPG signature key from project",
|
||||
Example: templates.Examples(`
|
||||
# Remove GnuPG signature key KEY-ID from project PROJECT
|
||||
argocd proj remove-signature-key PROJECT KEY-ID
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -275,13 +239,6 @@ func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *co
|
||||
var command = &cobra.Command{
|
||||
Use: "add-destination PROJECT SERVER/NAME NAMESPACE",
|
||||
Short: "Add project destination",
|
||||
Example: templates.Examples(`
|
||||
# Add project destination using a server URL (SERVER) in the specified namespace (NAMESPACE) on the project with name PROJECT
|
||||
argocd proj add-destination PROJECT SERVER NAMESPACE
|
||||
|
||||
# Add project destination using a server name (NAME) in the specified namespace (NAMESPACE) on the project with name PROJECT
|
||||
argocd proj add-destination PROJECT NAME NAMESPACE --name
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -319,10 +276,6 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions)
|
||||
var command = &cobra.Command{
|
||||
Use: "remove-destination PROJECT SERVER NAMESPACE",
|
||||
Short: "Remove project destination",
|
||||
Example: templates.Examples(`
|
||||
# Remove the destination (SERVER) from the specified namespace (NAMESPACE) on the project with name PROJECT
|
||||
argocd proj remove-destination PROJECT SERVER NAMESPACE
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -367,13 +320,6 @@ func NewProjectAddOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOptions)
|
||||
var command = &cobra.Command{
|
||||
Use: "add-orphaned-ignore PROJECT GROUP KIND",
|
||||
Short: "Add a resource to orphaned ignore list",
|
||||
Example: templates.Examples(`
|
||||
# Add a resource of the specified GROUP and KIND to orphaned ignore list on the project with name PROJECT
|
||||
argocd proj add-orphaned-ignore PROJECT GROUP KIND
|
||||
|
||||
# Add resources of the specified GROUP and KIND using a NAME pattern to orphaned ignore list on the project with name PROJECT
|
||||
argocd proj add-orphaned-ignore PROJECT GROUP KIND --name NAME
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -417,15 +363,8 @@ func NewProjectRemoveOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOption
|
||||
name string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "remove-orphaned-ignore PROJECT GROUP KIND",
|
||||
Use: "remove-orphaned-ignore PROJECT GROUP KIND NAME",
|
||||
Short: "Remove a resource from orphaned ignore list",
|
||||
Example: templates.Examples(`
|
||||
# Remove a resource of the specified GROUP and KIND from orphaned ignore list on the project with name PROJECT
|
||||
argocd proj remove-orphaned-ignore PROJECT GROUP KIND
|
||||
|
||||
# Remove resources of the specified GROUP and KIND using a NAME pattern from orphaned ignore list on the project with name PROJECT
|
||||
argocd proj remove-orphaned-ignore PROJECT GROUP KIND --name NAME
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -472,10 +411,6 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
var command = &cobra.Command{
|
||||
Use: "add-source PROJECT URL",
|
||||
Short: "Add project source repository",
|
||||
Example: templates.Examples(`
|
||||
# Add a source repository (URL) to the project with name PROJECT
|
||||
argocd proj add-source PROJECT URL
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -538,7 +473,7 @@ func modifyResourcesList(list *[]metav1.GroupKind, add bool, listDesc string, gr
|
||||
}
|
||||
}
|
||||
|
||||
func modifyResourceListCmd(cmdUse, cmdDesc, examples string, clientOpts *argocdclient.ClientOptions, allow bool, namespacedList bool) *cobra.Command {
|
||||
func modifyResourceListCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, allow bool, namespacedList bool) *cobra.Command {
|
||||
var (
|
||||
listType string
|
||||
defaultList string
|
||||
@@ -549,9 +484,8 @@ func modifyResourceListCmd(cmdUse, cmdDesc, examples string, clientOpts *argocdc
|
||||
defaultList = "allow"
|
||||
}
|
||||
var command = &cobra.Command{
|
||||
Use: cmdUse,
|
||||
Short: cmdDesc,
|
||||
Example: templates.Examples(examples),
|
||||
Use: cmdUse,
|
||||
Short: cmdDesc,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -600,44 +534,28 @@ func modifyResourceListCmd(cmdUse, cmdDesc, examples string, clientOpts *argocdc
|
||||
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "allow-namespace-resource PROJECT GROUP KIND"
|
||||
desc := "Removes a namespaced API resource from the deny list or add a namespaced API resource to the allow list"
|
||||
examples := `
|
||||
# Removes a namespaced API resource with specified GROUP and KIND from the deny list or add a namespaced API resource to the allow list for project PROJECT
|
||||
argocd proj allow-namespace-resource PROJECT GROUP KIND
|
||||
`
|
||||
return modifyResourceListCmd(use, desc, examples, clientOpts, true, true)
|
||||
return modifyResourceListCmd(use, desc, clientOpts, true, true)
|
||||
}
|
||||
|
||||
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
|
||||
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "deny-namespace-resource PROJECT GROUP KIND"
|
||||
desc := "Adds a namespaced API resource to the deny list or removes a namespaced API resource from the allow list"
|
||||
examples := `
|
||||
# Adds a namespaced API resource with specified GROUP and KIND from the deny list or removes a namespaced API resource from the allow list for project PROJECT
|
||||
argocd proj deny-namespace-resource PROJECT GROUP KIND
|
||||
`
|
||||
return modifyResourceListCmd(use, desc, examples, clientOpts, false, true)
|
||||
return modifyResourceListCmd(use, desc, clientOpts, false, true)
|
||||
}
|
||||
|
||||
// NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command
|
||||
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "deny-cluster-resource PROJECT GROUP KIND"
|
||||
desc := "Removes a cluster-scoped API resource from the allow list and adds it to deny list"
|
||||
examples := `
|
||||
# Removes a cluster-scoped API resource with specified GROUP and KIND from the allow list and adds it to deny list for project PROJECT
|
||||
argocd proj deny-cluster-resource PROJECT GROUP KIND
|
||||
`
|
||||
return modifyResourceListCmd(use, desc, examples, clientOpts, false, false)
|
||||
return modifyResourceListCmd(use, desc, clientOpts, false, false)
|
||||
}
|
||||
|
||||
// NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command
|
||||
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
use := "allow-cluster-resource PROJECT GROUP KIND"
|
||||
desc := "Adds a cluster-scoped API resource to the allow list and removes it from deny list"
|
||||
examples := `
|
||||
# Adds a cluster-scoped API resource with specified GROUP and KIND to the allow list and removes it from deny list for project PROJECT
|
||||
argocd proj allow-cluster-resource PROJECT GROUP KIND
|
||||
`
|
||||
return modifyResourceListCmd(use, desc, examples, clientOpts, true, false)
|
||||
return modifyResourceListCmd(use, desc, clientOpts, true, false)
|
||||
}
|
||||
|
||||
// NewProjectRemoveSourceCommand returns a new instance of an `argocd proj remove-src` command
|
||||
@@ -645,10 +563,6 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
var command = &cobra.Command{
|
||||
Use: "remove-source PROJECT URL",
|
||||
Short: "Remove project source repository",
|
||||
Example: templates.Examples(`
|
||||
# Remove URL source repository to project PROJECT
|
||||
argocd proj remove-source PROJECT URL
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -689,10 +603,6 @@ func NewProjectDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
var command = &cobra.Command{
|
||||
Use: "delete PROJECT",
|
||||
Short: "Delete project",
|
||||
Example: templates.Examples(`
|
||||
# Delete the project with name PROJECT
|
||||
argocd proj delete PROJECT
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -736,13 +646,6 @@ func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
var command = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List projects",
|
||||
Example: templates.Examples(`
|
||||
# List all available projects
|
||||
argocd proj list
|
||||
|
||||
# List all available projects in yaml format
|
||||
argocd proj list -o yaml
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -908,14 +811,6 @@ func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
var command = &cobra.Command{
|
||||
Use: "get PROJECT",
|
||||
Short: "Get project details",
|
||||
Example: templates.Examples(`
|
||||
# Get details from project PROJECT
|
||||
argocd proj get PROJECT
|
||||
|
||||
# Get details from project PROJECT in yaml format
|
||||
argocd proj get PROJECT -o yaml
|
||||
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -953,10 +848,6 @@ func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
var command = &cobra.Command{
|
||||
Use: "edit PROJECT",
|
||||
Short: "Edit project",
|
||||
Example: templates.Examples(`
|
||||
# Edit the information on project with name PROJECT
|
||||
argocd proj edit PROJECT
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/jwt"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -57,30 +56,6 @@ func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
var command = &cobra.Command{
|
||||
Use: "add-policy PROJECT ROLE-NAME",
|
||||
Short: "Add a policy to a project role",
|
||||
Example: `# Before adding new policy
|
||||
$ argocd proj role get test-project test-role
|
||||
Role Name: test-role
|
||||
Description:
|
||||
Policies:
|
||||
p, proj:test-project:test-role, projects, get, test-project, allow
|
||||
JWT Tokens:
|
||||
ID ISSUED-AT EXPIRES-AT
|
||||
1696759698 2023-10-08T11:08:18+01:00 (3 hours ago) <none>
|
||||
|
||||
# Add a new policy to allow update to the project
|
||||
$ argocd proj role add-policy test-project test-role -a update -p allow -o project
|
||||
|
||||
# Policy should be updated
|
||||
$ argocd proj role get test-project test-role
|
||||
Role Name: test-role
|
||||
Description:
|
||||
Policies:
|
||||
p, proj:test-project:test-role, projects, get, test-project, allow
|
||||
p, proj:test-project:test-role, applications, update, test-project/project, allow
|
||||
JWT Tokens:
|
||||
ID ISSUED-AT EXPIRES-AT
|
||||
1696759698 2023-10-08T11:08:18+01:00 (3 hours ago) <none>
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -118,30 +93,6 @@ func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *
|
||||
var command = &cobra.Command{
|
||||
Use: "remove-policy PROJECT ROLE-NAME",
|
||||
Short: "Remove a policy from a role within a project",
|
||||
Example: `List the policy of the test-role before removing a policy
|
||||
$ argocd proj role get test-project test-role
|
||||
Role Name: test-role
|
||||
Description:
|
||||
Policies:
|
||||
p, proj:test-project:test-role, projects, get, test-project, allow
|
||||
p, proj:test-project:test-role, applications, update, test-project/project, allow
|
||||
JWT Tokens:
|
||||
ID ISSUED-AT EXPIRES-AT
|
||||
1696759698 2023-10-08T11:08:18+01:00 (3 hours ago) <none>
|
||||
|
||||
# Remove the policy to allow update to objects
|
||||
$ argocd proj role remove-policy test-project test-role -a update -p allow -o project
|
||||
|
||||
# The role should be removed now.
|
||||
$ argocd proj role get test-project test-role
|
||||
Role Name: test-role
|
||||
Description:
|
||||
Policies:
|
||||
p, proj:test-project:test-role, projects, get, test-project, allow
|
||||
JWT Tokens:
|
||||
ID ISSUED-AT EXPIRES-AT
|
||||
1696759698 2023-10-08T11:08:18+01:00 (4 hours ago) <none>
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -189,11 +140,6 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
var command = &cobra.Command{
|
||||
Use: "create PROJECT ROLE-NAME",
|
||||
Short: "Create a project role",
|
||||
Example: templates.Examples(`
|
||||
# Create a project role in the "my-project" project with the name "my-role".
|
||||
argocd proj role create my-project my-role --description "My project role description"
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -228,9 +174,8 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
// NewProjectRoleDeleteCommand returns a new instance of an `argocd proj role delete` command
|
||||
func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "delete PROJECT ROLE-NAME",
|
||||
Short: "Delete a project role",
|
||||
Example: `$ argocd proj role delete test-project test-role`,
|
||||
Use: "delete PROJECT ROLE-NAME",
|
||||
Short: "Delete a project role",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -278,15 +223,8 @@ func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *c
|
||||
tokenID string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "create-token PROJECT ROLE-NAME",
|
||||
Short: "Create a project token",
|
||||
Example: `$ argocd proj role create-token test-project test-role
|
||||
Create token succeeded for proj:test-project:test-role.
|
||||
ID: f316c466-40bd-4cfd-8a8c-1392e92255d4
|
||||
Issued At: 2023-10-08T15:21:40+01:00
|
||||
Expires At: Never
|
||||
Token: xxx
|
||||
`,
|
||||
Use: "create-token PROJECT ROLE-NAME",
|
||||
Short: "Create a project token",
|
||||
Aliases: []string{"token-create"},
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
@@ -350,13 +288,8 @@ func NewProjectRoleListTokensCommand(clientOpts *argocdclient.ClientOptions) *co
|
||||
useUnixTime bool
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "list-tokens PROJECT ROLE-NAME",
|
||||
Short: "List tokens for a given role.",
|
||||
Example: `$ argocd proj role list-tokens test-project test-role
|
||||
ID ISSUED AT EXPIRES AT
|
||||
f316c466-40bd-4cfd-8a8c-1392e92255d4 2023-10-08T15:21:40+01:00 Never
|
||||
fa9d3517-c52d-434c-9bff-215b38508842 2023-10-08T11:08:18+01:00 Never
|
||||
`,
|
||||
Use: "list-tokens PROJECT ROLE-NAME",
|
||||
Short: "List tokens for a given role.",
|
||||
Aliases: []string{"list-token", "token-list"},
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
@@ -406,35 +339,8 @@ fa9d3517-c52d-434c-9bff-215b38508842 2023-10-08T11:08:18+01:00 Never
|
||||
// NewProjectRoleDeleteTokenCommand returns a new instance of an `argocd proj role delete-token` command
|
||||
func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
|
||||
Short: "Delete a project token",
|
||||
Example: `#Create project test-project
|
||||
$ argocd proj create test-project
|
||||
|
||||
# Create a role associated with test-project
|
||||
$ argocd proj role create test-project test-role
|
||||
Role 'test-role' created
|
||||
|
||||
# Create test-role associated with test-project
|
||||
$ argocd proj role create-token test-project test-role
|
||||
Create token succeeded for proj:test-project:test-role.
|
||||
ID: c312450e-12e1-4e0d-9f65-fac9cb027b32
|
||||
Issued At: 2023-10-08T13:58:57+01:00
|
||||
Expires At: Never
|
||||
Token: xxx
|
||||
|
||||
# Get test-role id to input into the delete-token command below
|
||||
$ argocd proj role get test-project test-role
|
||||
Role Name: test-role
|
||||
Description:
|
||||
Policies:
|
||||
p, proj:test-project:test-role, projects, get, test-project, allow
|
||||
JWT Tokens:
|
||||
ID ISSUED-AT EXPIRES-AT
|
||||
1696769937 2023-10-08T13:58:57+01:00 (6 minutes ago) <none>
|
||||
|
||||
$ argocd proj role delete-token test-project test-role 1696769937
|
||||
`,
|
||||
Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
|
||||
Short: "Delete a project token",
|
||||
Aliases: []string{"token-delete", "remove-token"},
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
@@ -483,15 +389,6 @@ func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
var command = &cobra.Command{
|
||||
Use: "list PROJECT",
|
||||
Short: "List all the roles in a project",
|
||||
Example: templates.Examples(`
|
||||
# This command will list all the roles in argocd-project in a default table format.
|
||||
argocd proj role list PROJECT
|
||||
|
||||
# List the roles in the project in formats like json, yaml, wide, or name.
|
||||
argocd proj role list PROJECT --output json
|
||||
|
||||
`),
|
||||
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -527,16 +424,6 @@ func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
var command = &cobra.Command{
|
||||
Use: "get PROJECT ROLE-NAME",
|
||||
Short: "Get the details of a specific role",
|
||||
Example: `$ argocd proj role get test-project test-role
|
||||
Role Name: test-role
|
||||
Description:
|
||||
Policies:
|
||||
p, proj:test-project:test-role, projects, get, test-project, allow
|
||||
JWT Tokens:
|
||||
ID ISSUED-AT EXPIRES-AT
|
||||
1696774900 2023-10-08T15:21:40+01:00 (4 minutes ago) <none>
|
||||
1696759698 2023-10-08T11:08:18+01:00 (4 hours ago) <none>
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -125,23 +125,6 @@ func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) *
|
||||
var command = &cobra.Command{
|
||||
Use: "add PROJECT",
|
||||
Short: "Add a sync window to a project",
|
||||
Example: `# Add a 1 hour allow sync window
|
||||
argocd proj windows add PROJECT \
|
||||
--kind allow \
|
||||
--schedule "0 22 * * *" \
|
||||
--duration 1h \
|
||||
--applications "*"
|
||||
|
||||
# Add a deny sync window with the ability to manually sync.
|
||||
argocd proj windows add PROJECT \
|
||||
--kind deny \
|
||||
--schedule "30 10 * * *" \
|
||||
--duration 30m \
|
||||
--applications "prod-\\*,website" \
|
||||
--namespaces "default,\\*-prod" \
|
||||
--clusters "prod,staging" \
|
||||
--manual-sync
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -175,7 +158,7 @@ argocd proj windows add PROJECT \
|
||||
return command
|
||||
}
|
||||
|
||||
// NewProjectWindowsDeleteCommand returns a new instance of an `argocd proj windows delete` command
|
||||
// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows delete` command
|
||||
func NewProjectWindowsDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "delete PROJECT ID",
|
||||
@@ -222,10 +205,6 @@ func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
Use: "update PROJECT ID",
|
||||
Short: "Update a project sync window",
|
||||
Long: "Update a project sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
|
||||
Example: `# Change a sync window's schedule
|
||||
argocd proj windows update PROJECT ID \
|
||||
--schedule "0 20 * * *"
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -274,12 +253,6 @@ func NewProjectWindowsListCommand(clientOpts *argocdclient.ClientOptions) *cobra
|
||||
var command = &cobra.Command{
|
||||
Use: "list PROJECT",
|
||||
Short: "List project sync windows",
|
||||
Example: `# List project windows
|
||||
argocd proj windows list PROJECT
|
||||
|
||||
# List project windows in yaml format
|
||||
argocd proj windows list PROJECT -o yaml
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -312,8 +285,8 @@ argocd proj windows list PROJECT -o yaml
|
||||
func printSyncWindows(proj *v1alpha1.AppProject) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
var fmtStr string
|
||||
headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC", "TIMEZONE"}
|
||||
fmtStr = strings.Repeat("%s\t", len(headers)) + "\n"
|
||||
headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC"}
|
||||
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
|
||||
fmt.Fprintf(w, fmtStr, headers...)
|
||||
if proj.Spec.SyncWindows.HasWindows() {
|
||||
for i, window := range proj.Spec.SyncWindows {
|
||||
@@ -327,7 +300,6 @@ func printSyncWindows(proj *v1alpha1.AppProject) {
|
||||
formatListOutput(window.Namespaces),
|
||||
formatListOutput(window.Clusters),
|
||||
formatManualOutput(window.ManualSync),
|
||||
window.TimeZone,
|
||||
}
|
||||
fmt.Fprintf(w, fmtStr, vals...)
|
||||
}
|
||||
|
||||
@@ -84,18 +84,6 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
|
||||
},
|
||||
Example: `
|
||||
# Reinitiates the login with previous contexts
|
||||
argocd relogin
|
||||
|
||||
# Reinitiates the login with password
|
||||
argocd relogin --password YOUR_PASSWORD
|
||||
|
||||
# Configure direct access using Kubernetes API server
|
||||
argocd login cd.argoproj.io --core
|
||||
|
||||
# If user logged in with - "argocd login cd.argoproj.io" with sso login
|
||||
# The command - "argocd relogin" will Reinitiates SSO login and updates the server context`,
|
||||
}
|
||||
command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate")
|
||||
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application")
|
||||
|
||||
@@ -29,19 +29,6 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
},
|
||||
Example: `
|
||||
# Add git repository connection parameters
|
||||
argocd repo add git@git.example.com:repos/repo
|
||||
|
||||
# Get a Configured Repository by URL
|
||||
argocd repo get https://github.com/yourusername/your-repo.git
|
||||
|
||||
# List Configured Repositories
|
||||
argocd repo list
|
||||
|
||||
# Remove Repository Credentials
|
||||
argocd repo rm https://github.com/yourusername/your-repo.git
|
||||
`,
|
||||
}
|
||||
|
||||
command.AddCommand(NewRepoAddCommand(clientOpts))
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
"github.com/argoproj/argo-cd/v2/util/templates"
|
||||
)
|
||||
|
||||
// NewRepoCredsCommand returns a new instance of an `argocd repocreds` command
|
||||
@@ -25,16 +24,6 @@ func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
var command = &cobra.Command{
|
||||
Use: "repocreds",
|
||||
Short: "Manage repository connection parameters",
|
||||
Example: templates.Examples(`
|
||||
# Add credentials with user/pass authentication to use for all repositories under the specified URL
|
||||
argocd repocreds add URL --username USERNAME --password PASSWORD
|
||||
|
||||
# List all the configured repository credentials
|
||||
argocd repocreds list
|
||||
|
||||
# Remove credentials for the repositories with speficied URL
|
||||
argocd repocreds rm URL
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -195,10 +184,6 @@ func NewRepoCredsRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
var command = &cobra.Command{
|
||||
Use: "rm CREDSURL",
|
||||
Short: "Remove repository credentials",
|
||||
Example: templates.Examples(`
|
||||
# Remove credentials for the repositories with URL https://git.example.com/repos
|
||||
argocd repocreds rm https://git.example.com/repos/
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -246,19 +231,6 @@ func NewRepoCredsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
var command = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List configured repository credentials",
|
||||
Example: templates.Examples(`
|
||||
# List all repo urls
|
||||
argocd repocreds list
|
||||
|
||||
# List all repo urls in json format
|
||||
argocd repocreds list -o json
|
||||
|
||||
# List all repo urls in yaml format
|
||||
argocd repocreds list -o yaml
|
||||
|
||||
# List all repo urls in url format
|
||||
argocd repocreds list -o url
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -295,7 +295,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
Backoff: &argoappv1.Backoff{
|
||||
Duration: appOpts.retryBackoffDuration.String(),
|
||||
MaxDuration: appOpts.retryBackoffMaxDuration.String(),
|
||||
Factor: pointer.Int64(appOpts.retryBackoffFactor),
|
||||
Factor: pointer.Int64Ptr(appOpts.retryBackoffFactor),
|
||||
},
|
||||
}
|
||||
} else if appOpts.retryLimit == 0 {
|
||||
|
||||
@@ -115,7 +115,7 @@ func GetOrphanedResourcesSettings(flagSet *pflag.FlagSet, opts ProjectOpts) *v1a
|
||||
if opts.orphanedResourcesEnabled || warnChanged {
|
||||
settings := v1alpha1.OrphanedResourcesMonitorSettings{}
|
||||
if warnChanged {
|
||||
settings.Warn = pointer.Bool(opts.orphanedResourcesWarn)
|
||||
settings.Warn = pointer.BoolPtr(opts.orphanedResourcesWarn)
|
||||
}
|
||||
return &settings
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
|
||||
grpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize),
|
||||
grpc.KeepaliveEnforcementPolicy(
|
||||
keepalive.EnforcementPolicy{
|
||||
MinTime: common.GetGRPCKeepAliveEnforcementMinimum(),
|
||||
MinTime: common.GRPCKeepAliveEnforcementMinimum,
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
@@ -258,8 +258,6 @@ const (
|
||||
EnvRedisName = "ARGOCD_REDIS_NAME"
|
||||
// EnvRedisHaProxyName is the name of the Argo CD Redis HA proxy component, as specified by the value under the LabelKeyAppName label key.
|
||||
EnvRedisHaProxyName = "ARGOCD_REDIS_HAPROXY_NAME"
|
||||
// EnvGRPCKeepAliveMin defines the GRPCKeepAliveEnforcementMinimum, used in the grpc.KeepaliveEnforcementPolicy. Expects a "Duration" format (e.g. 10s).
|
||||
EnvGRPCKeepAliveMin = "ARGOCD_GRPC_KEEP_ALIVE_MIN"
|
||||
)
|
||||
|
||||
// Config Management Plugin related constants
|
||||
@@ -353,25 +351,10 @@ const (
|
||||
|
||||
// gRPC settings
|
||||
const (
|
||||
defaultGRPCKeepAliveEnforcementMinimum = 10 * time.Second
|
||||
)
|
||||
|
||||
func GetGRPCKeepAliveEnforcementMinimum() time.Duration {
|
||||
if GRPCKeepAliveMinStr := os.Getenv(EnvGRPCKeepAliveMin); GRPCKeepAliveMinStr != "" {
|
||||
GRPCKeepAliveMin, err := time.ParseDuration(GRPCKeepAliveMinStr)
|
||||
if err != nil {
|
||||
logrus.Warnf("invalid env var value for %s: cannot parse: %s. Default value %s will be used.", EnvGRPCKeepAliveMin, err, defaultGRPCKeepAliveEnforcementMinimum)
|
||||
return defaultGRPCKeepAliveEnforcementMinimum
|
||||
}
|
||||
return GRPCKeepAliveMin
|
||||
}
|
||||
return defaultGRPCKeepAliveEnforcementMinimum
|
||||
}
|
||||
|
||||
func GetGRPCKeepAliveTime() time.Duration {
|
||||
GRPCKeepAliveEnforcementMinimum = 10 * time.Second
|
||||
// GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors
|
||||
return 2 * GetGRPCKeepAliveEnforcementMinimum()
|
||||
}
|
||||
GRPCKeepAliveTime = 2 * GRPCKeepAliveEnforcementMinimum
|
||||
)
|
||||
|
||||
// Security severity logging
|
||||
const (
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Test env var not set for EnvGRPCKeepAliveMin
|
||||
func Test_GRPCKeepAliveMinNotSet(t *testing.T) {
|
||||
grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
|
||||
grpcKeepAliveExpectedMin := defaultGRPCKeepAliveEnforcementMinimum
|
||||
assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
|
||||
|
||||
grpcKeepAliveTime := GetGRPCKeepAliveTime()
|
||||
assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
|
||||
}
|
||||
|
||||
// Test valid env var set for EnvGRPCKeepAliveMin
|
||||
func Test_GRPCKeepAliveMinIsSet(t *testing.T) {
|
||||
numSeconds := 15
|
||||
os.Setenv(EnvGRPCKeepAliveMin, fmt.Sprintf("%ds", numSeconds))
|
||||
|
||||
grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
|
||||
grpcKeepAliveExpectedMin := time.Duration(numSeconds) * time.Second
|
||||
assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
|
||||
|
||||
grpcKeepAliveTime := GetGRPCKeepAliveTime()
|
||||
assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
|
||||
}
|
||||
|
||||
// Test invalid env var set for EnvGRPCKeepAliveMin
|
||||
func Test_GRPCKeepAliveMinIncorrectlySet(t *testing.T) {
|
||||
numSeconds := 15
|
||||
os.Setenv(EnvGRPCKeepAliveMin, fmt.Sprintf("%d", numSeconds))
|
||||
|
||||
grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
|
||||
grpcKeepAliveExpectedMin := defaultGRPCKeepAliveEnforcementMinimum
|
||||
assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
|
||||
|
||||
grpcKeepAliveTime := GetGRPCKeepAliveTime()
|
||||
assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
|
||||
}
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
|
||||
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/diff"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
@@ -57,8 +56,6 @@ import (
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
@@ -66,6 +63,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/helm"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
settings_util "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -150,14 +148,9 @@ func NewApplicationController(
|
||||
persistResourceHealth bool,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
applicationNamespaces []string,
|
||||
rateLimiterConfig *ratelimiter.AppControllerRateLimiterConfig,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v", appResyncPeriod, appHardResyncPeriod)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
if rateLimiterConfig == nil {
|
||||
rateLimiterConfig = ratelimiter.GetDefaultAppRateLimiterConfig()
|
||||
log.Info("Using default workqueue rate limiter config")
|
||||
}
|
||||
ctrl := ApplicationController{
|
||||
cache: argoCache,
|
||||
namespace: namespace,
|
||||
@@ -165,10 +158,10 @@ func NewApplicationController(
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_reconciliation_queue"),
|
||||
appOperationQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_operation_processing_queue"),
|
||||
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "project_reconciliation_queue"),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
@@ -188,11 +181,10 @@ func NewApplicationController(
|
||||
appInformer, appLister := ctrl.newApplicationInformerAndLister()
|
||||
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
|
||||
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, indexers)
|
||||
var err error
|
||||
_, err = projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if key, err := cache.MetaNamespaceKeyFunc(obj); err == nil {
|
||||
ctrl.projectRefreshQueue.AddRateLimited(key)
|
||||
ctrl.projectRefreshQueue.Add(key)
|
||||
if projMeta, ok := obj.(metav1.Object); ok {
|
||||
ctrl.InvalidateProjectsCache(projMeta.GetName())
|
||||
}
|
||||
@@ -201,7 +193,7 @@ func NewApplicationController(
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
if key, err := cache.MetaNamespaceKeyFunc(new); err == nil {
|
||||
ctrl.projectRefreshQueue.AddRateLimited(key)
|
||||
ctrl.projectRefreshQueue.Add(key)
|
||||
if projMeta, ok := new.(metav1.Object); ok {
|
||||
ctrl.InvalidateProjectsCache(projMeta.GetName())
|
||||
}
|
||||
@@ -209,7 +201,6 @@ func NewApplicationController(
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
if key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err == nil {
|
||||
// immediately push to queue for deletes
|
||||
ctrl.projectRefreshQueue.Add(key)
|
||||
if projMeta, ok := obj.(metav1.Object); ok {
|
||||
ctrl.InvalidateProjectsCache(projMeta.GetName())
|
||||
@@ -217,9 +208,6 @@ func NewApplicationController(
|
||||
}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
|
||||
deploymentInformer := factory.Apps().V1().Deployments()
|
||||
@@ -247,7 +235,7 @@ func NewApplicationController(
|
||||
}
|
||||
|
||||
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
|
||||
|
||||
var err error
|
||||
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -823,8 +811,8 @@ func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith
|
||||
ctrl.appRefreshQueue.AddAfter(key, *after)
|
||||
ctrl.appOperationQueue.AddAfter(key, *after)
|
||||
} else {
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
ctrl.appOperationQueue.Add(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1331,7 +1319,8 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
|
||||
}
|
||||
|
||||
kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error {
|
||||
_, err := ctrl.PatchAppWithWriteBack(context.Background(), app.Name, app.Namespace, types.MergePatchType, patchJSON, metav1.PatchOptions{})
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
// Stop retrying updating deleted application
|
||||
if apierr.IsNotFound(err) {
|
||||
@@ -1369,27 +1358,6 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
|
||||
}
|
||||
}
|
||||
|
||||
// writeBackToInformer writes a just recently updated App back into the informer cache.
|
||||
// This prevents the situation where the controller operates on a stale app and repeats work
|
||||
func (ctrl *ApplicationController) writeBackToInformer(app *appv1.Application) {
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name, "appNamespace": app.Namespace, "project": app.Spec.Project, "informer-writeBack": true})
|
||||
err := ctrl.appInformer.GetStore().Update(app)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to update informer store: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// PatchAppWithWriteBack patches an application and writes it back to the informer cache
|
||||
func (ctrl *ApplicationController) PatchAppWithWriteBack(ctx context.Context, name, ns string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appv1.Application, err error) {
|
||||
patchedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ns).Patch(ctx, name, pt, data, opts, subresources...)
|
||||
if err != nil {
|
||||
return patchedApp, err
|
||||
}
|
||||
ctrl.writeBackToInformer(patchedApp)
|
||||
return patchedApp, err
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
|
||||
patchMs := time.Duration(0) // time spent in doing patch/update calls
|
||||
setOpMs := time.Duration(0) // time spent in doing Operation patch calls in autosync
|
||||
@@ -1426,22 +1394,20 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
return
|
||||
}
|
||||
app := origApp.DeepCopy()
|
||||
logCtx := log.WithFields(log.Fields{
|
||||
"application": app.QualifiedName(),
|
||||
"level": comparisonLevel,
|
||||
"dest-server": origApp.Spec.Destination.Server,
|
||||
"dest-name": origApp.Spec.Destination.Name,
|
||||
"dest-namespace": origApp.Spec.Destination.Namespace,
|
||||
})
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
reconcileDuration := time.Since(startTime)
|
||||
ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
|
||||
logCtx.WithFields(log.Fields{
|
||||
"time_ms": reconcileDuration.Milliseconds(),
|
||||
"patch_ms": patchMs.Milliseconds(),
|
||||
"setop_ms": setOpMs.Milliseconds(),
|
||||
"time_ms": reconcileDuration.Milliseconds(),
|
||||
"patch_ms": patchMs.Milliseconds(),
|
||||
"setop_ms": setOpMs.Milliseconds(),
|
||||
"level": comparisonLevel,
|
||||
"dest-server": origApp.Spec.Destination.Server,
|
||||
"dest-name": origApp.Spec.Destination.Name,
|
||||
"dest-namespace": origApp.Spec.Destination.Namespace,
|
||||
}).Info("Reconciliation completed")
|
||||
}()
|
||||
|
||||
@@ -1601,7 +1567,7 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
} else if hardExpired || softExpired {
|
||||
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
|
||||
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
// TODO: find existing Golang bug or create a new one
|
||||
//TODO: find existing Golang bug or create a new one
|
||||
reconciledAtStr := "never"
|
||||
if app.Status.ReconciledAt != nil {
|
||||
reconciledAtStr = app.Status.ReconciledAt.String()
|
||||
@@ -1663,7 +1629,8 @@ func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Applica
|
||||
if err != nil {
|
||||
logCtx.Errorf("error constructing app spec patch: %v", err)
|
||||
} else if modified {
|
||||
_, err := ctrl.PatchAppWithWriteBack(context.Background(), app.Name, app.Namespace, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
logCtx.Errorf("Error persisting normalized application spec: %v", err)
|
||||
} else {
|
||||
@@ -1707,7 +1674,8 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
defer func() {
|
||||
patchMs = time.Since(start)
|
||||
}()
|
||||
_, err = ctrl.PatchAppWithWriteBack(context.Background(), orig.Name, orig.Namespace, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
logCtx.Warnf("Error updating application: %v", err)
|
||||
} else {
|
||||
@@ -1817,7 +1785,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
|
||||
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
start := time.Now()
|
||||
updatedApp, err := argo.SetAppOperation(appIf, app.Name, &op)
|
||||
_, err := argo.SetAppOperation(appIf, app.Name, &op)
|
||||
setOpTime := time.Since(start)
|
||||
if err != nil {
|
||||
if goerrors.Is(err, argo.ErrAnotherOperationInProgress) {
|
||||
@@ -1829,8 +1797,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
|
||||
} else {
|
||||
ctrl.writeBackToInformer(updatedApp)
|
||||
}
|
||||
message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
|
||||
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message, "")
|
||||
@@ -2018,7 +1984,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
},
|
||||
)
|
||||
lister := applisters.NewApplicationLister(informer.GetIndexer())
|
||||
_, err := informer.AddEventHandler(
|
||||
informer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if !ctrl.canProcessApp(obj) {
|
||||
@@ -2026,8 +1992,8 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
}
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
ctrl.appOperationQueue.Add(key)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
@@ -2047,7 +2013,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
compareWith = CompareWithLatest.Pointer()
|
||||
}
|
||||
ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, nil)
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
ctrl.appOperationQueue.Add(key)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
if !ctrl.canProcessApp(obj) {
|
||||
@@ -2057,15 +2023,11 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
// key function.
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
// for deletes, we immediately add to the refresh queue
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return informer, lister
|
||||
}
|
||||
|
||||
|
||||
@@ -123,7 +123,6 @@ func newFakeController(data *fakeData) *ApplicationController {
|
||||
true,
|
||||
nil,
|
||||
data.applicationNamespaces,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -625,7 +624,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
|
||||
})
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
_, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
|
||||
return []*v1alpha1.Cluster{}, nil
|
||||
@@ -675,7 +674,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
|
||||
})
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
objs, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
|
||||
return []*v1alpha1.Cluster{}, nil
|
||||
@@ -709,7 +708,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
|
||||
})
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
_, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
|
||||
return []*v1alpha1.Cluster{}, nil
|
||||
@@ -794,7 +793,7 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
// Verify we normalize the app because project is missing
|
||||
ctrl := newFakeController(&data)
|
||||
key, _ := cache.MetaNamespaceKeyFunc(app)
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
fakeAppCs.ReactionChain = nil
|
||||
normalized := false
|
||||
@@ -804,7 +803,7 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
normalized = true
|
||||
}
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
ctrl.processAppRefreshQueueItem()
|
||||
assert.True(t, normalized)
|
||||
@@ -816,7 +815,7 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
data.apps[0] = app
|
||||
ctrl := newFakeController(&data)
|
||||
key, _ := cache.MetaNamespaceKeyFunc(app)
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
fakeAppCs.ReactionChain = nil
|
||||
normalized := false
|
||||
@@ -826,7 +825,7 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
normalized = true
|
||||
}
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
ctrl.processAppRefreshQueueItem()
|
||||
assert.False(t, normalized)
|
||||
@@ -923,7 +922,7 @@ func TestSetOperationStateOnDeletedApp(t *testing.T) {
|
||||
patched := false
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, &v1alpha1.Application{}, apierr.NewNotFound(schema.GroupResource{}, "my-app")
|
||||
return true, nil, apierr.NewNotFound(schema.GroupResource{}, "my-app")
|
||||
})
|
||||
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
|
||||
assert.True(t, patched)
|
||||
@@ -955,9 +954,9 @@ func TestSetOperationStateLogRetries(t *testing.T) {
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if !patched {
|
||||
patched = true
|
||||
return true, &v1alpha1.Application{}, errors.New("fake error")
|
||||
return true, nil, errors.New("fake error")
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
|
||||
assert.True(t, patched)
|
||||
@@ -1273,13 +1272,13 @@ func TestUpdateReconciledAt(t *testing.T) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
t.Run("UpdatedOnFullReconciliation", func(t *testing.T) {
|
||||
receivedPatch = map[string]interface{}{}
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), nil)
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
|
||||
ctrl.processAppRefreshQueueItem()
|
||||
|
||||
@@ -1294,7 +1293,7 @@ func TestUpdateReconciledAt(t *testing.T) {
|
||||
|
||||
t.Run("NotUpdatedOnPartialReconciliation", func(t *testing.T) {
|
||||
receivedPatch = map[string]interface{}{}
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
|
||||
ctrl.processAppRefreshQueueItem()
|
||||
@@ -1324,7 +1323,7 @@ func TestProjectErrorToCondition(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
})
|
||||
key, _ := cache.MetaNamespaceKeyFunc(app)
|
||||
ctrl.appRefreshQueue.AddRateLimited(key)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
|
||||
ctrl.processAppRefreshQueueItem()
|
||||
@@ -1347,7 +1346,7 @@ func TestFinalizeProjectDeletion_HasApplications(t *testing.T) {
|
||||
patched := false
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
@@ -1365,7 +1364,7 @@ func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.AppProject{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
@@ -1390,7 +1389,7 @@ func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
@@ -1418,7 +1417,7 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
}()
|
||||
|
||||
@@ -1444,7 +1443,7 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
@@ -1487,7 +1486,7 @@ func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
@@ -1520,7 +1519,7 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
78
controller/cache/cache_test.go
vendored
@@ -120,7 +120,7 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
|
||||
liveStateCacheLock := sync.RWMutex{}
|
||||
externalLockRef := sync.RWMutex{}
|
||||
gitopsEngineClusterCache := &mocks.ClusterCache{}
|
||||
clustersCache := liveStateCache{
|
||||
clusters: map[string]cache.ClusterCache{
|
||||
@@ -132,14 +132,11 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
settingsMgr: settingsMgr,
|
||||
// Set the lock here so we can reference it later
|
||||
// nolint We need to overwrite here to have access to the lock
|
||||
lock: liveStateCacheLock,
|
||||
lock: externalLockRef,
|
||||
}
|
||||
channel := make(chan string)
|
||||
// Mocked lock held by the gitops-engine cluster cache
|
||||
gitopsEngineClusterCacheLock := sync.Mutex{}
|
||||
// Ensure completion of both EnsureSynced and Invalidate
|
||||
ensureSyncedCompleted := sync.Mutex{}
|
||||
invalidateCompleted := sync.Mutex{}
|
||||
mockMutex := sync.RWMutex{}
|
||||
// Locks to force trigger condition during test
|
||||
// Condition order:
|
||||
// EnsuredSynced -> Locks gitops-engine
|
||||
@@ -147,39 +144,40 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
// EnsureSynced via sync, newResource, populateResourceInfoHandler -> attempts to Lock liveStateCache
|
||||
// handleDeleteEvent via cluster.Invalidate -> attempts to Lock gitops-engine
|
||||
handleDeleteWasCalled := sync.Mutex{}
|
||||
engineHoldsEngineLock := sync.Mutex{}
|
||||
ensureSyncedCompleted.Lock()
|
||||
invalidateCompleted.Lock()
|
||||
engineHoldsLock := sync.Mutex{}
|
||||
handleDeleteWasCalled.Lock()
|
||||
engineHoldsEngineLock.Lock()
|
||||
|
||||
engineHoldsLock.Lock()
|
||||
gitopsEngineClusterCache.On("EnsureSynced").Run(func(args mock.Arguments) {
|
||||
gitopsEngineClusterCacheLock.Lock()
|
||||
t.Log("EnsureSynced: Engine has engine lock")
|
||||
engineHoldsEngineLock.Unlock()
|
||||
defer gitopsEngineClusterCacheLock.Unlock()
|
||||
// Wait until handleDeleteEvent holds the liveStateCache lock
|
||||
// Held by EnsureSync calling into sync and watchEvents
|
||||
mockMutex.Lock()
|
||||
defer mockMutex.Unlock()
|
||||
// Continue Execution of timer func
|
||||
engineHoldsLock.Unlock()
|
||||
// Wait for handleDeleteEvent to be called triggering the lock
|
||||
// on the liveStateCache
|
||||
handleDeleteWasCalled.Lock()
|
||||
// Try and obtain the liveStateCache lock
|
||||
clustersCache.lock.Lock()
|
||||
t.Log("EnsureSynced: Engine has LiveStateCache lock")
|
||||
clustersCache.lock.Unlock()
|
||||
ensureSyncedCompleted.Unlock()
|
||||
}).Return(nil).Once()
|
||||
|
||||
gitopsEngineClusterCache.On("Invalidate").Run(func(args mock.Arguments) {
|
||||
// Allow EnsureSynced to continue now that we're in the deadlock condition
|
||||
t.Logf("handleDelete was called, EnsureSynced continuing...")
|
||||
handleDeleteWasCalled.Unlock()
|
||||
// Wait until gitops engine holds the gitops lock
|
||||
// This prevents timing issues if we reach this point before EnsureSynced has obtained the lock
|
||||
engineHoldsEngineLock.Lock()
|
||||
t.Log("Invalidate: Engine has engine lock")
|
||||
engineHoldsEngineLock.Unlock()
|
||||
// Lock engine lock
|
||||
gitopsEngineClusterCacheLock.Lock()
|
||||
t.Log("Invalidate: Invalidate has engine lock")
|
||||
gitopsEngineClusterCacheLock.Unlock()
|
||||
invalidateCompleted.Unlock()
|
||||
// Try and obtain the lock on the liveStateCache
|
||||
alreadyFailed := !externalLockRef.TryLock()
|
||||
if alreadyFailed {
|
||||
channel <- "DEADLOCKED -- EnsureSynced could not obtain lock on liveStateCache"
|
||||
return
|
||||
}
|
||||
externalLockRef.Lock()
|
||||
t.Logf("EnsureSynce was able to lock liveStateCache")
|
||||
externalLockRef.Unlock()
|
||||
}).Return(nil).Once()
|
||||
gitopsEngineClusterCache.On("Invalidate").Run(func(args mock.Arguments) {
|
||||
// If deadlock is fixed should be able to acquire lock here
|
||||
alreadyFailed := !mockMutex.TryLock()
|
||||
if alreadyFailed {
|
||||
channel <- "DEADLOCKED -- Invalidate could not obtain lock on gitops-engine"
|
||||
return
|
||||
}
|
||||
mockMutex.Lock()
|
||||
t.Logf("Invalidate was able to lock gitops-engine cache")
|
||||
mockMutex.Unlock()
|
||||
}).Return()
|
||||
go func() {
|
||||
// Start the gitops-engine lock holds
|
||||
@@ -189,14 +187,14 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
assert.Fail(t, err.Error())
|
||||
}
|
||||
}()
|
||||
// Wait for EnsureSynced to grab the lock for gitops-engine
|
||||
engineHoldsLock.Lock()
|
||||
t.Log("EnsureSynced has obtained lock on gitops-engine")
|
||||
engineHoldsLock.Unlock()
|
||||
// Run in background
|
||||
go clustersCache.handleDeleteEvent(testCluster.Server)
|
||||
// Allow execution to continue on clusters cache call to trigger lock
|
||||
ensureSyncedCompleted.Lock()
|
||||
invalidateCompleted.Lock()
|
||||
t.Log("Competing functions were able to obtain locks")
|
||||
invalidateCompleted.Unlock()
|
||||
ensureSyncedCompleted.Unlock()
|
||||
handleDeleteWasCalled.Unlock()
|
||||
channel <- "PASSED"
|
||||
}()
|
||||
select {
|
||||
|
||||
42
controller/cache/info.go
vendored
@@ -37,16 +37,6 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range un.GetAnnotations() {
|
||||
if strings.HasPrefix(k, common.AnnotationKeyLinkPrefix) {
|
||||
if res.NetworkingInfo == nil {
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{}
|
||||
}
|
||||
res.NetworkingInfo.ExternalURLs = append(res.NetworkingInfo.ExternalURLs, v)
|
||||
}
|
||||
}
|
||||
|
||||
switch gvk.Group {
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
@@ -68,6 +58,15 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
|
||||
populateIstioVirtualServiceInfo(un, res)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range un.GetAnnotations() {
|
||||
if strings.HasPrefix(k, common.AnnotationKeyLinkPrefix) {
|
||||
if res.NetworkingInfo == nil {
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{}
|
||||
}
|
||||
res.NetworkingInfo.ExternalURLs = append(res.NetworkingInfo.ExternalURLs, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getIngress(un *unstructured.Unstructured) []v1.LoadBalancerIngress {
|
||||
@@ -94,13 +93,7 @@ func populateServiceInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
if serviceType, ok, err := unstructured.NestedString(un.Object, "spec", "type"); ok && err == nil && serviceType == string(v1.ServiceTypeLoadBalancer) {
|
||||
ingress = getIngress(un)
|
||||
}
|
||||
|
||||
var urls []string
|
||||
if res.NetworkingInfo != nil {
|
||||
urls = res.NetworkingInfo.ExternalURLs
|
||||
}
|
||||
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress, ExternalURLs: urls}
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress}
|
||||
}
|
||||
|
||||
func getServiceName(backend map[string]interface{}, gvk schema.GroupVersionKind) (string, error) {
|
||||
@@ -270,12 +263,7 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
|
||||
targets = append(targets, target)
|
||||
}
|
||||
|
||||
var urls []string
|
||||
if res.NetworkingInfo != nil {
|
||||
urls = res.NetworkingInfo.ExternalURLs
|
||||
}
|
||||
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets}
|
||||
}
|
||||
|
||||
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
@@ -386,13 +374,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
if restarts > 0 {
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Restart Count", Value: fmt.Sprintf("%d", restarts)})
|
||||
}
|
||||
|
||||
var urls []string
|
||||
if res.NetworkingInfo != nil {
|
||||
urls = res.NetworkingInfo.ExternalURLs
|
||||
}
|
||||
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels(), ExternalURLs: urls}
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels()}
|
||||
}
|
||||
|
||||
func populateHostNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
|
||||
2
controller/cache/info_test.go
vendored
@@ -406,7 +406,7 @@ func TestGetLinkAnnotatedIngressInfo(t *testing.T) {
|
||||
Kind: kube.ServiceKind,
|
||||
Name: "helm-guestbook",
|
||||
}},
|
||||
ExternalURLs: []string{"http://my-grafana.com/ingress-link", "https://helm-guestbook.com/"},
|
||||
ExternalURLs: []string{"https://helm-guestbook.com/", "http://my-grafana.com/ingress-link"},
|
||||
}, info.NetworkingInfo)
|
||||
}
|
||||
|
||||
|
||||
@@ -391,7 +391,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
now := metav1.Now()
|
||||
|
||||
var manifestInfos []*apiclient.ManifestResponse
|
||||
targetNsExists := false
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
// If the length of revisions is not same as the length of sources,
|
||||
@@ -454,13 +453,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
LastTransitionTime: &now,
|
||||
})
|
||||
}
|
||||
|
||||
// If we reach this path, this means that a namespace has been both defined in Git, as well in the
|
||||
// application's managedNamespaceMetadata. We want to ensure that this manifest is the one being used instead
|
||||
// of what is present in managedNamespaceMetadata.
|
||||
if isManagedNamespace(targetObj, app) {
|
||||
targetNsExists = true
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("dedup_ms")
|
||||
|
||||
@@ -519,10 +511,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
// entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a
|
||||
// namespace which we can compare the live namespace with. For that, we'll do the same as is done in
|
||||
// gitops-engine, the difference here being that we create a managed namespace which is only used for comparison.
|
||||
//
|
||||
// targetNsExists == true implies that it already exists as a target, so no need to add the namespace to the
|
||||
// targetObjs array.
|
||||
if isManagedNamespace(liveObj, app) && !targetNsExists {
|
||||
if isManagedNamespace(liveObj, app) {
|
||||
nsSpec := &v1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}}
|
||||
managedNs, err := kubeutil.ToUnstructured(nsSpec)
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 106 KiB |
|
Before Width: | Height: | Size: 175 KiB |
|
Before Width: | Height: | Size: 69 KiB |
|
Before Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 126 KiB |
|
Before Width: | Height: | Size: 114 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 68 KiB |
|
Before Width: | Height: | Size: 7.1 KiB |
|
Before Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 17 KiB |
|
Before Width: | Height: | Size: 67 KiB |
@@ -24,9 +24,10 @@ $ curl $ARGOCD_SERVER/api/v1/applications -H "Authorization: Bearer $ARGOCD_TOKE
|
||||
|
||||
#### How to Avoid 403 Errors for Missing Applications
|
||||
|
||||
All endpoints of the Applications API accept an optional `project` query string parameter. If the parameter
|
||||
is specified, and the specified Application does not exist, the API will return a `404` error.
|
||||
All endpoints of the Applications API accept an optional `project` query string parameter. If the parameter is
|
||||
specified, and the specified Application does not exist, or if the Application does exist but is not in the given
|
||||
project, the API will return a `404` error.
|
||||
|
||||
Additionally, if the `project` query string parameter is specified and the Application exists but is not in
|
||||
the given `project`, the API will return a `403` error. This is to prevent leaking information about the
|
||||
existence of Applications to users who do not have access to them.
|
||||
If the `project` query string parameter is specified, and the Application does not exist, the API will return a `403`
|
||||
error. This is to prevent leaking information about the existence of Applications to users who do not have access to
|
||||
them.
|
||||
|
||||
@@ -66,7 +66,7 @@ make builder-image IMAGE_NAMESPACE=argoproj IMAGE_TAG=v1.0.0
|
||||
## Public CD
|
||||
|
||||
Every commit to master is built and published to `ghcr.io/argoproj/argo-cd/argocd:<version>-<short-sha>`. The list of images is available at
|
||||
[https://github.com/argoproj/argo-cd/packages](https://github.com/argoproj/argo-cd/packages).
|
||||
https://github.com/argoproj/argo-cd/packages.
|
||||
|
||||
!!! note
|
||||
GitHub docker registry [requires](https://github.community/t5/GitHub-Actions/docker-pull-from-public-GitHub-Package-Registry-fail-with-quot/m-p/32888#M1294) authentication to read
|
||||
|
||||
@@ -36,7 +36,7 @@ registerResourceExtension(component: ExtensionComponent, group: string, kind: st
|
||||
- `component: ExtensionComponent` is a React component that receives the following properties:
|
||||
|
||||
- application: Application - Argo CD Application resource;
|
||||
- resource: State - the Kubernetes resource object;
|
||||
- resource: State - the kubernetes resource object;
|
||||
- tree: ApplicationTree - includes list of all resources that comprise the application;
|
||||
|
||||
See properties interfaces in [models.ts](https://github.com/argoproj/argo-cd/blob/master/ui/src/app/shared/models.ts)
|
||||
|
||||
@@ -2,14 +2,13 @@
|
||||
|
||||
## Developing And Testing
|
||||
|
||||
The website is built using `mkdocs` and `mkdocs-material`.
|
||||
The website is build using `mkdocs` and `mkdocs-material`.
|
||||
|
||||
To test:
|
||||
|
||||
```bash
|
||||
make serve-docs
|
||||
```
|
||||
|
||||
Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/).
|
||||
|
||||
## Deploying
|
||||
|
||||
@@ -205,11 +205,10 @@ you should edit your `~/.kube/config` and modify the `server` option to point to
|
||||
4. Finally, so that you don't have to keep updating your kube-config whenever you spin up a new k3d cluster, add `--api-port $IP:6550` to your **k3d cluster create** command, where $IP is the value from step 1. An example command is provided here:
|
||||
|
||||
```
|
||||
k3d cluster create my-cluster --wait --k3s-arg '--disable=traefik@server:*' --api-port $IP:6550 -p 443:443@loadbalancer
|
||||
k3d cluster create my-cluster --wait --k3s-server-arg '--disable=traefik' --api-port $IP:6550 -p 443:443@loadbalancer
|
||||
```
|
||||
|
||||
!!!note
|
||||
For k3d versions less than v5.0.0, the example command flags `--k3s-arg` and `'--disable=traefik@server:*'` should change to `--k3s-server-arg` and `'--disable=traefik'`, respectively.
|
||||
Starting from k3d v5.0.0 the example command flags `--k3s-server-arg` and `'--disable=traefik'` would have to be changed to `--k3s-arg` and `'--disable=traefik@server:*'`, respectively.
|
||||
|
||||
## The development cycle
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ meeting:
|
||||
|
||||

|
||||
|
||||
Argo CD is implemented as a Kubernetes controller which continuously monitors running applications
|
||||
Argo CD is implemented as a kubernetes controller which continuously monitors running applications
|
||||
and compares the current, live state against the desired target state (as specified in the Git repo).
|
||||
A deployed application whose live state deviates from the target state is considered `OutOfSync`.
|
||||
Argo CD reports & visualizes the differences, while providing facilities to automatically or
|
||||
|
||||
@@ -20,7 +20,7 @@ Some manual steps will need to be performed by the Argo CD administrator in orde
|
||||
|
||||
### Cluster-scoped Argo CD installation
|
||||
|
||||
This feature can only be enabled and used when your Argo CD is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will not work with an Argo CD installed in namespace-scoped mode.
|
||||
This feature can only be enabled and used when your Argo CD is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will *not* work with an Argo CD installed in namespace-scoped mode.
|
||||
|
||||
### Switch resource tracking method
|
||||
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
# Add support for self-signed TLS / Certificates for Gitlab SCM/PR Provider
|
||||
|
||||
## Implementation details
|
||||
|
||||
### Overview
|
||||
|
||||
In order for a self-signed TLS certificate be used by an ApplicationSet's SCM / PR Gitlab Generator, the certificate needs to be mounted on the application-controller. The path of the mounted certificate must be explicitly set using the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH` or alternatively using parameter `--scm-root-ca-path`. The applicationset controller will read the mounted certificate to create the Gitlab client for SCM/PR Providers
|
||||
|
||||
This can be achieved conveniently by setting `applicationsetcontroller.scm.root.ca.path` in the argocd-cmd-params-cm ConfigMap. Be sure to restart the ApplicationSet controller after setting this value.
|
||||
@@ -35,8 +35,6 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: myapps
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- scmProvider:
|
||||
gitea:
|
||||
@@ -55,6 +53,7 @@ spec:
|
||||
|
||||
Therefore administrator must restrict the urls of the allowed SCM Providers (example: `https://git.mydomain.com/,https://gitlab.mydomain.com/`) by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.allowed.scm.providers`. If another url is used, it will be rejected by the applicationset controller.
|
||||
|
||||
|
||||
For example:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -65,14 +64,7 @@ data:
|
||||
applicationsetcontroller.allowed.scm.providers: https://git.mydomain.com/,https://gitlab.mydomain.com/
|
||||
```
|
||||
|
||||
!!! note
|
||||
Please note url used in the `api` field of the `ApplicationSet` must match the url declared by the Administrator including the protocol
|
||||
|
||||
!!! warning
|
||||
The allow-list only applies to SCM providers for which the user may configure a custom `api`. Where an SCM or PR
|
||||
generator does not accept a custom API URL, the provider is implicitly allowed.
|
||||
|
||||
If you do not intend to allow users to use the SCM or PR generators, you can disable them entirely by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ALLOW_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.allow.scm.providers` to `false`.
|
||||
> Please note url used in the `api` field of the `ApplicationSet` must match the url declared by the Administrator including the protocol
|
||||
|
||||
### Overview
|
||||
|
||||
@@ -139,19 +131,17 @@ metadata:
|
||||
name: team-one-product-one
|
||||
namespace: team-one-cd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
list:
|
||||
- name: infra
|
||||
- id: infra
|
||||
project: infra-project
|
||||
- name: team-two
|
||||
- id: team-two
|
||||
project: team-two-project
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}-escalation'
|
||||
spec:
|
||||
project: "{{.project}}"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{name}}-escalation'
|
||||
spec:
|
||||
project: "{{project}}"
|
||||
```
|
||||
|
||||
### ApplicationSet names
|
||||
|
||||
@@ -6,7 +6,7 @@ These settings allow you to exert control over when, and how, changes are made t
|
||||
|
||||
Here are some of the controller settings that may be modified to alter the ApplicationSet controller's resource-handling behaviour.
|
||||
|
||||
## Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
|
||||
### Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
|
||||
|
||||
To prevent the ApplicationSet controller from creating, modifying, or deleting any `Application` resources, you may enable `dry-run` mode. This essentially switches the controller into a "read only" mode, where the controller Reconcile loop will run, but no resources will be modified.
|
||||
|
||||
@@ -14,7 +14,7 @@ To enable dry-run, add `--dryrun true` to the ApplicationSet Deployment's contai
|
||||
|
||||
See 'How to modify ApplicationSet container parameters' below for detailed steps on how to add this parameter to the controller.
|
||||
|
||||
## Managed Applications modification Policies
|
||||
### Managed Applications modification Policies
|
||||
|
||||
The ApplicationSet controller supports a parameter `--policy`, which is specified on launch (within the controller Deployment container), and which restricts what types of modifications will be made to managed Argo CD `Application` resources.
|
||||
|
||||
@@ -41,7 +41,7 @@ If the controller parameter `--policy` is set, it takes precedence on the field
|
||||
|
||||
This does not prevent deletion of Applications if the ApplicationSet is deleted
|
||||
|
||||
### Controller parameter
|
||||
#### Controller parameter
|
||||
|
||||
To allow the ApplicationSet controller to *create* `Application` resources, but prevent any further modification, such as deletion, or modification of Application fields, add this parameter in the ApplicationSet controller:
|
||||
```
|
||||
@@ -59,7 +59,7 @@ spec:
|
||||
applicationsSync: create-only
|
||||
```
|
||||
|
||||
## Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
|
||||
### Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
|
||||
|
||||
To allow the ApplicationSet controller to create or modify `Application` resources, but prevent Applications from being deleted, add the following parameter to the ApplicationSet controller `Deployment`:
|
||||
```
|
||||
@@ -79,7 +79,7 @@ spec:
|
||||
applicationsSync: create-update
|
||||
```
|
||||
|
||||
## Ignore certain changes to Applications
|
||||
### Ignore certain changes to Applications
|
||||
|
||||
The ApplicationSet spec includes an `ignoreApplicationDifferences` field, which allows you to specify which fields of
|
||||
the ApplicationSet should be ignored when comparing Applications.
|
||||
@@ -98,94 +98,11 @@ spec:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
- name: some-app
|
||||
jqPathExpressions:
|
||||
jqExpressions:
|
||||
- .spec.source.helm.values
|
||||
```
|
||||
|
||||
### Allow temporarily toggling auto-sync
|
||||
|
||||
One of the most common use cases for ignoring differences is to allow temporarily toggling auto-sync for an Application.
|
||||
|
||||
For example, if you have an ApplicationSet that is configured to automatically sync Applications, you may want to temporarily
|
||||
disable auto-sync for a specific Application. You can do this by adding an ignore rule for the `spec.syncPolicy.automated` field.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
spec:
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/syncPolicy
|
||||
```
|
||||
|
||||
### Limitations of `ignoreApplicationDifferences`
|
||||
|
||||
When an ApplicationSet is reconciled, the controller will compare the ApplicationSet spec with the spec of each Application
|
||||
that it manages. If there are any differences, the controller will generate a patch to update the Application to match the
|
||||
ApplicationSet spec.
|
||||
|
||||
The generated patch is a MergePatch. According to the MergePatch documentation, "existing lists will be completely
|
||||
replaced by new lists" when there is a change to the list.
|
||||
|
||||
This limits the effectiveness of `ignoreApplicationDifferences` when the ignored field is in a list. For example, if you
|
||||
have an application with multiple sources, and you want to ignore changes to the `targetRevision` of one of the sources,
|
||||
changes in other fields or in other sources will cause the entire `sources` list to be replaced, and the `targetRevision`
|
||||
field will be reset to the value defined in the ApplicationSet.
|
||||
|
||||
For example, consider this ApplicationSet:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
spec:
|
||||
ignoreApplicationDifferences:
|
||||
- jqPathExpressions:
|
||||
- .spec.sources[] | select(.repoURL == "https://git.example.com/org/repo1").targetRevision
|
||||
template:
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: main
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
You can freely change the `targetRevision` of the `repo1` source, and the ApplicationSet controller will not overwrite
|
||||
your change.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: fix/bug-123
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
However, if you change the `targetRevision` of the `repo2` source, the ApplicationSet controller will overwrite the entire
|
||||
`sources` field.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: main
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
!!! note
|
||||
[Future improvements](https://github.com/argoproj/argo-cd/issues/15975) to the ApplicationSet controller may
|
||||
eliminate this problem. For example, the `ref` field might be made a merge key, allowing the ApplicationSet
|
||||
controller to generate and use a StrategicMergePatch instead of a MergePatch. You could then target a specific
|
||||
source by `ref`, ignore changes to a field in that source, and changes to other sources would not cause the ignored
|
||||
field to be overwritten.
|
||||
|
||||
## Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
|
||||
### Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
|
||||
|
||||
By default, when an `Application` resource is deleted by the ApplicationSet controller, all of the child resources of the Application will be deleted as well (such as, all of the Application's `Deployments`, `Services`, etc).
|
||||
|
||||
@@ -202,7 +119,7 @@ spec:
|
||||
More information on the specific behaviour of `preserveResourcesOnDeletion`, and deletion in ApplicationSet controller and Argo CD in general, can be found on the [Application Deletion](Application-Deletion.md) page.
|
||||
|
||||
|
||||
## Prevent an Application's child resources from being modified
|
||||
### Prevent an Application's child resources from being modified
|
||||
|
||||
Changes made to the ApplicationSet will propagate to the Applications managed by the ApplicationSet, and then Argo CD will propagate the Application changes to the underlying cluster resources (as per [Argo CD Integration](Argo-CD-Integration.md)).
|
||||
|
||||
@@ -268,11 +185,6 @@ kubectl apply -n argocd -f install.yaml
|
||||
|
||||
## Preserving changes made to an Applications annotations and labels
|
||||
|
||||
!!! note
|
||||
The same behavior can be achieved on a per-app basis using the [`ignoreApplicationDifferences`](#ignore-certain-changes-to-applications)
|
||||
feature described above. However, preserved fields may be configured globally, a feature that is not yet available
|
||||
for `ignoreApplicationDifferences`.
|
||||
|
||||
It is common practice in Kubernetes to store state in annotations, operators will often make use of this. To allow for this, it is possible to configure a list of annotations that the ApplicationSet should preserve when reconciling.
|
||||
|
||||
For example, imagine that we have an Application created from an ApplicationSet, but a custom annotation and label has since been added (to the Application) that does not exist in the `ApplicationSet` resource:
|
||||
@@ -308,18 +220,3 @@ By default, the Argo CD notifications and the Argo CD refresh type annotations a
|
||||
!!!note
|
||||
One can also set global preserved fields for the controller by passing a comma separated list of annotations and labels to
|
||||
`ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS` and `ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS` respectively.
|
||||
|
||||
## Debugging unexpected changes to Applications
|
||||
|
||||
When the ApplicationSet controller makes a change to an application, it logs the patch at the debug level. To see these
|
||||
logs, set the log level to debug in the `argocd-cmd-params-cm` ConfigMap in the `argocd` namespace:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
namespace: argocd
|
||||
data:
|
||||
applicationsetcontroller.log.level: debug
|
||||
```
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Cluster Decision Resource Generator
|
||||
|
||||
The cluster decision resource generates a list of Argo CD clusters. This is done using [duck-typing](https://pkg.go.dev/knative.dev/pkg/apis/duck), which does not require knowledge of the full shape of the referenced Kubernetes resource. The following is an example of a cluster-decision-resource-based ApplicationSet generator:
|
||||
The cluster decision resource generates a list of Argo CD clusters. This is done using [duck-typing](https://pkg.go.dev/knative.dev/pkg/apis/duck), which does not require knowledge of the full shape of the referenced kubernetes resource. The following is an example of a cluster-decision-resource-based ApplicationSet generator:
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
@@ -8,8 +8,6 @@ metadata:
|
||||
name: guestbook
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusterDecisionResource:
|
||||
# ConfigMap with GVK information for the duck type resource
|
||||
@@ -28,7 +26,7 @@ spec:
|
||||
requeueAfterSeconds: 60
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}-guestbook'
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
@@ -36,7 +34,7 @@ spec:
|
||||
targetRevision: HEAD
|
||||
path: guestbook
|
||||
destination:
|
||||
server: '{{.clusterName}}' # 'server' field of the secret
|
||||
server: '{{clusterName}}' # 'server' field of the secret
|
||||
namespace: guestbook
|
||||
```
|
||||
The `quak` resource, referenced by the ApplicationSet `clusterDecisionResource` generator:
|
||||
|
||||
@@ -39,13 +39,11 @@ metadata:
|
||||
name: guestbook
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters: {} # Automatically use all clusters defined within Argo CD
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}-guestbook' # 'name' field of the Secret
|
||||
name: '{{name}}-guestbook' # 'name' field of the Secret
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
@@ -53,7 +51,7 @@ spec:
|
||||
targetRevision: HEAD
|
||||
path: guestbook
|
||||
destination:
|
||||
server: '{{.server}}' # 'server' field of the secret
|
||||
server: '{{server}}' # 'server' field of the secret
|
||||
namespace: guestbook
|
||||
```
|
||||
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/cluster).*)
|
||||
@@ -69,8 +67,6 @@ metadata:
|
||||
name: guestbook
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
@@ -109,8 +105,6 @@ The cluster generator will automatically target both local and non-local cluster
|
||||
If you wish to target only remote clusters with your Applications (e.g. you want to exclude the local cluster), then use a cluster selector with labels, for example:
|
||||
```yaml
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
@@ -143,8 +137,6 @@ You may pass additional, arbitrary string key-value pairs via the `values` field
|
||||
In this example, a `revision` parameter value is passed, based on matching labels on the cluster secret:
|
||||
```yaml
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
@@ -162,16 +154,16 @@ spec:
|
||||
revision: stable
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}-guestbook'
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
# The cluster values field for each generator will be substituted here:
|
||||
targetRevision: '{{.values.revision}}'
|
||||
targetRevision: '{{values.revision}}'
|
||||
path: guestbook
|
||||
destination:
|
||||
server: '{{.server}}'
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
```
|
||||
|
||||
@@ -192,8 +184,6 @@ Extending the example above, we could do something like this:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
@@ -202,8 +192,8 @@ spec:
|
||||
# A key-value map for arbitrary parameters
|
||||
values:
|
||||
# If `my-custom-annotation` is in your cluster secret, `revision` will be substituted with it.
|
||||
revision: '{{index .metadata.annotations "my-custom-annotation"}}'
|
||||
clusterName: '{{.name}}'
|
||||
revision: '{{metadata.annotations.my-custom-annotation}}'
|
||||
clusterName: '{{name}}'
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -211,19 +201,19 @@ spec:
|
||||
values:
|
||||
# production uses a different revision value, for 'stable' branch
|
||||
revision: stable
|
||||
clusterName: '{{.name}}'
|
||||
clusterName: '{{name}}'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}-guestbook'
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
# The cluster values field for each generator will be substituted here:
|
||||
targetRevision: '{{.values.revision}}'
|
||||
targetRevision: '{{values.revision}}'
|
||||
path: guestbook
|
||||
destination:
|
||||
# In this case this is equivalent to just using {{name}}
|
||||
server: '{{.values.clusterName}}'
|
||||
server: '{{values.clusterName}}'
|
||||
namespace: guestbook
|
||||
```
|
||||
|
||||
@@ -210,8 +210,6 @@ metadata:
|
||||
name: cluster-addons
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/example/example-repo.git
|
||||
@@ -219,19 +217,19 @@ spec:
|
||||
directories:
|
||||
- path: '*'
|
||||
values:
|
||||
cluster: '{{.branch}}-{{.path.basename}}'
|
||||
cluster: '{{branch}}-{{path}}'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.path.basename}}'
|
||||
name: '{{path.basename}}'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/example/example-repo.git
|
||||
targetRevision: HEAD
|
||||
path: '{{.path.path}}'
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{.values.cluster}}'
|
||||
namespace: '{{values.cluster}}'
|
||||
```
|
||||
|
||||
!!! note
|
||||
@@ -325,15 +323,15 @@ As with other generators, clusters *must* already be defined within Argo CD, in
|
||||
|
||||
In addition to the flattened key/value pairs from the configuration file, the following generator parameters are provided:
|
||||
|
||||
- `{{.path.path}}`: The path to the directory containing matching configuration file within the Git repository. Example: `/clusters/clusterA`, if the config file was `/clusters/clusterA/config.json`
|
||||
- `{{index .path n}}`: The path to the matching configuration file within the Git repository, split into array elements (`n` - array index). Example: `index .path 0: clusters`, `index .path 1: clusterA`
|
||||
- `{{.path.basename}}`: Basename of the path to the directory containing the configuration file (e.g. `clusterA`, with the above example.)
|
||||
- `{{.path.basenameNormalized}}`: This field is the same as `.path.basename` with unsupported characters replaced with `-` (e.g. a `path` of `/directory/directory_2`, and `.path.basename` of `directory_2` would produce `directory-2` here).
|
||||
- `{{.path.filename}}`: The matched filename. e.g., `config.json` in the above example.
|
||||
- `{{.path.filenameNormalized}}`: The matched filename with unsupported characters replaced with `-`.
|
||||
- `{{path}}`: The path to the directory containing matching configuration file within the Git repository. Example: `/clusters/clusterA`, if the config file was `/clusters/clusterA/config.json`
|
||||
- `{{path[n]}}`: The path to the matching configuration file within the Git repository, split into array elements (`n` - array index). Example: `path[0]: clusters`, `path[1]: clusterA`
|
||||
- `{{path.basename}}`: Basename of the path to the directory containing the configuration file (e.g. `clusterA`, with the above example.)
|
||||
- `{{path.basenameNormalized}}`: This field is the same as `path.basename` with unsupported characters replaced with `-` (e.g. a `path` of `/directory/directory_2`, and `path.basename` of `directory_2` would produce `directory-2` here).
|
||||
- `{{path.filename}}`: The matched filename. e.g., `config.json` in the above example.
|
||||
- `{{path.filenameNormalized}}`: The matched filename with unsupported characters replaced with `-`.
|
||||
|
||||
**Note**: The right-most *directory* name always becomes `{{.path.basename}}`. For example, from `- path: /one/two/three/four/config.json`, `{{.path.basename}}` will be `four`.
|
||||
The filename can always be accessed using `{{.path.filename}}`.
|
||||
**Note**: The right-most *directory* name always becomes `{{path.basename}}`. For example, from `- path: /one/two/three/four/config.json`, `{{path.basename}}` will be `four`.
|
||||
The filename can always be accessed using `{{path.filename}}`.
|
||||
|
||||
**Note**: If the `pathParamPrefix` option is specified, all `path`-related parameter names above will be prefixed with the specified value and a dot separator. E.g., if `pathParamPrefix` is `myRepo`, then the generated parameter name would be `myRepo.path` instead of `path`. Using this option is necessary in a Matrix generator where both child generators are Git generators (to avoid conflicts when merging the child generators’ items).
|
||||
|
||||
@@ -351,8 +349,6 @@ metadata:
|
||||
name: guestbook
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
@@ -360,18 +356,18 @@ spec:
|
||||
files:
|
||||
- path: "applicationset/examples/git-generator-files-discovery/cluster-config/**/config.json"
|
||||
values:
|
||||
base_dir: "{{index .path 0}}/{{index .path 1}}/{{index .path 2}}"
|
||||
base_dir: "{{path[0]}}/{{path[1]}}/{{path[2]}}"
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.cluster.name}}-guestbook'
|
||||
name: '{{cluster.name}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: "{{.values.base_dir}}/apps/guestbook"
|
||||
path: "{{values.base_dir}}/apps/guestbook"
|
||||
destination:
|
||||
server: '{{.cluster.address}}'
|
||||
server: '{{cluster.address}}'
|
||||
namespace: guestbook
|
||||
```
|
||||
|
||||
@@ -409,15 +405,15 @@ the contents of webhook payloads are considered untrusted, and will only result
|
||||
application (a process which already occurs at three-minute intervals). If ApplicationSet is publicly
|
||||
accessible, then configuring a webhook secret is recommended to prevent a DDoS attack.
|
||||
|
||||
In the `argocd-secret` Kubernetes secret, include the Git provider's webhook secret configured in step 1.
|
||||
In the `argocd-secret` kubernetes secret, include the Git provider's webhook secret configured in step 1.
|
||||
|
||||
Edit the Argo CD Kubernetes secret:
|
||||
Edit the Argo CD kubernetes secret:
|
||||
|
||||
```bash
|
||||
kubectl edit secret argocd-secret -n argocd
|
||||
```
|
||||
|
||||
TIP: for ease of entering secrets, Kubernetes supports inputting secrets in the `stringData` field,
|
||||
TIP: for ease of entering secrets, kubernetes supports inputting secrets in the `stringData` field,
|
||||
which saves you the trouble of base64 encoding the values and copying it to the `data` field.
|
||||
Simply copy the shared webhook secret created in step 1, to the corresponding
|
||||
GitHub/GitLab/BitBucket key under the `stringData` field:
|
||||
|
||||
@@ -8,26 +8,25 @@ metadata:
|
||||
name: guestbook
|
||||
namespace: argocd
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
# - cluster: engineering-prod
|
||||
# url: https://kubernetes.default.svc
|
||||
# foo: bar
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.cluster}}-guestbook'
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: applicationset/examples/list-generator/guestbook/{{.cluster}}
|
||||
path: applicationset/examples/list-generator/guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{.url}}'
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
```
|
||||
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/list-generator).*)
|
||||
|
||||
@@ -35,8 +35,6 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
# matrix 'parent' generator
|
||||
- matrix:
|
||||
@@ -54,16 +52,16 @@ spec:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.path.basename}}-{{.name}}'
|
||||
name: '{{path.basename}}-{{name}}'
|
||||
spec:
|
||||
project: '{{index .metadata.labels "environment"}}'
|
||||
project: '{{metadata.labels.environment}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
path: '{{.path.path}}'
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{.server}}'
|
||||
namespace: '{{.path.basename}}'
|
||||
server: '{{server}}'
|
||||
namespace: '{{path.basename}}'
|
||||
```
|
||||
|
||||
First, the Git directory generator will scan the Git repository, discovering directories under the specified path. It discovers the argo-workflows and prometheus-operator applications, and produces two corresponding sets of parameters:
|
||||
@@ -119,8 +117,6 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
# matrix 'parent' generator
|
||||
- matrix:
|
||||
@@ -136,10 +132,10 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
kubernetes.io/environment: '{{.path.basename}}'
|
||||
kubernetes.io/environment: '{{path.basename}}'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}-guestbook'
|
||||
name: '{{name}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
@@ -147,7 +143,7 @@ spec:
|
||||
targetRevision: HEAD
|
||||
path: "examples/git-generator-files-discovery/apps/guestbook"
|
||||
destination:
|
||||
server: '{{.server}}'
|
||||
server: '{{server}}'
|
||||
namespace: guestbook
|
||||
```
|
||||
Here is the corresponding folder structure for the git repository used by the git-files generator:
|
||||
@@ -166,8 +162,8 @@ Here is the corresponding folder structure for the git repository used by the gi
|
||||
│ └── config.json
|
||||
└── git-generator-files.yaml
|
||||
```
|
||||
In the above example, the `{{.path.basename}}` parameters produced by the git-files generator will resolve to `dev` and `prod`.
|
||||
In the 2nd child generator, the label selector with label `kubernetes.io/environment: {{.path.basename}}` will resolve with the values produced by the first child generator's parameters (`kubernetes.io/environment: prod` and `kubernetes.io/environment: dev`).
|
||||
In the above example, the `{{path.basename}}` parameters produced by the git-files generator will resolve to `dev` and `prod`.
|
||||
In the 2nd child generator, the label selector with label `kubernetes.io/environment: {{path.basename}}` will resolve with the values produced by the first child generator's parameters (`kubernetes.io/environment: prod` and `kubernetes.io/environment: dev`).
|
||||
|
||||
So in the above example, clusters with the label `kubernetes.io/environment: prod` will have only prod-specific configuration (ie. `prod/config.json`) applied to it, wheres clusters
|
||||
with the label `kubernetes.io/environment: dev` will have only dev-specific configuration (ie. `dev/config.json`)
|
||||
@@ -266,8 +262,6 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: two-gits-with-path-param-prefix
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
@@ -286,7 +280,7 @@ spec:
|
||||
repoURL: https://github.com/some-org/some-repo.git
|
||||
revision: HEAD
|
||||
files:
|
||||
- path: "targets/{{.appName}}/*.json"
|
||||
- path: "targets/{{appName}}/*.json"
|
||||
pathParamPrefix: target
|
||||
template: {} # ...
|
||||
```
|
||||
@@ -396,7 +390,7 @@ For example, the below example would be invalid (cluster-generator must come aft
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
kubernetes.io/environment: '{{.path.basename}}' # {{.path.basename}} is produced by git-files generator
|
||||
kubernetes.io/environment: '{{path.basename}}' # {{path.basename}} is produced by git-files generator
|
||||
# git generator, 'child' #2
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/applicationset.git
|
||||
@@ -404,7 +398,7 @@ For example, the below example would be invalid (cluster-generator must come aft
|
||||
files:
|
||||
- path: "examples/git-generator-files-discovery/cluster-config/**/config.json"
|
||||
|
||||
1. You cannot have both child generators consuming parameters from each another. In the example below, the cluster generator is consuming the `{{.path.basename}}` parameter produced by the git-files generator, whereas the git-files generator is consuming the `{{.name}}` parameter produced by the cluster generator. This will result in a circular dependency, which is invalid.
|
||||
1. You cannot have both child generators consuming parameters from each another. In the example below, the cluster generator is consuming the `{{path.basename}}` parameter produced by the git-files generator, whereas the git-files generator is consuming the `{{name}}` parameter produced by the cluster generator. This will result in a circular dependency, which is invalid.
|
||||
|
||||
- matrix:
|
||||
generators:
|
||||
@@ -413,13 +407,13 @@ For example, the below example would be invalid (cluster-generator must come aft
|
||||
selector:
|
||||
matchLabels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
kubernetes.io/environment: '{{.path.basename}}' # {{.path.basename}} is produced by git-files generator
|
||||
kubernetes.io/environment: '{{path.basename}}' # {{path.basename}} is produced by git-files generator
|
||||
# git generator, 'child' #2
|
||||
- git:
|
||||
repoURL: https://github.com/argoproj/applicationset.git
|
||||
revision: HEAD
|
||||
files:
|
||||
- path: "examples/git-generator-files-discovery/cluster-config/engineering/{{.name}}**/config.json" # {{.name}} is produced by cluster generator
|
||||
- path: "examples/git-generator-files-discovery/cluster-config/engineering/{{name}}**/config.json" # {{name}} is produced by cluster generator
|
||||
|
||||
1. When using a Matrix generator nested inside another Matrix or Merge generator, [Post Selectors](Generators-Post-Selector.md) for this nested generator's generators will only be applied when enabled via `spec.applyNestedSelectors`. You may also need to enable this even if your Post Selectors are not within the nested matrix or Merge generator, but are instead a sibling of a nested Matrix or Merge generator.
|
||||
|
||||
|
||||
@@ -17,8 +17,6 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
# merge 'parent' generator
|
||||
- merge:
|
||||
@@ -43,9 +41,9 @@ spec:
|
||||
values.redis: 'true'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}'
|
||||
name: '{{name}}'
|
||||
spec:
|
||||
project: '{{index .metadata.labels "environment"}}'
|
||||
project: '{{metadata.labels.environment}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argo-cd.git
|
||||
targetRevision: HEAD
|
||||
@@ -53,11 +51,11 @@ spec:
|
||||
helm:
|
||||
parameters:
|
||||
- name: kafka
|
||||
value: '{{.values.kafka}}'
|
||||
value: '{{values.kafka}}'
|
||||
- name: redis
|
||||
value: '{{.values.redis}}'
|
||||
value: '{{values.redis}}'
|
||||
destination:
|
||||
server: '{{.server}}'
|
||||
server: '{{server}}'
|
||||
namespace: default
|
||||
```
|
||||
|
||||
@@ -124,8 +122,6 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: cluster-git
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
# merge 'parent' generator:
|
||||
# Use the selector set by both child generators to combine them.
|
||||
@@ -139,7 +135,7 @@ spec:
|
||||
# Set the selector to this location.
|
||||
- clusters:
|
||||
values:
|
||||
selector: '{{index .metadata.labels "location"}}'
|
||||
selector: '{{ metadata.labels.location }}'
|
||||
# The git repo may have different directories which correspond to the
|
||||
# cluster locations, using these as a selector.
|
||||
- git:
|
||||
@@ -148,19 +144,19 @@ spec:
|
||||
directories:
|
||||
- path: '*'
|
||||
values:
|
||||
selector: '{{.path.path}}'
|
||||
selector: '{{ path }}'
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.name}}'
|
||||
name: '{{name}}'
|
||||
spec:
|
||||
project: '{{index .metadata.labels "environment"}}'
|
||||
project: '{{metadata.labels.environment}}'
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
# The cluster values field for each generator will be substituted here:
|
||||
targetRevision: HEAD
|
||||
path: '{{.path.path}}'
|
||||
path: '{{path}}'
|
||||
destination:
|
||||
server: '{{.server}}'
|
||||
server: '{{server}}'
|
||||
namespace: default
|
||||
```
|
||||
|
||||
|
||||
@@ -22,8 +22,6 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: myplugin
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- plugin:
|
||||
# Specify the configMap where the plugin configuration is located.
|
||||
@@ -53,10 +51,10 @@ spec:
|
||||
metadata:
|
||||
name: myplugin
|
||||
annotations:
|
||||
example.from.input.parameters: "{{ index .generator.input.parameters.map "key1" }}"
|
||||
example.from.values: "{{ .values.value1 }}"
|
||||
example.from.input.parameters: "{{ generator.input.parameters.map.key1 }}"
|
||||
example.from.values: "{{ values.value1 }}"
|
||||
# The plugin determines what else it produces.
|
||||
example.from.plugin.output: "{{ .something.from.the.plugin }}"
|
||||
example.from.plugin.output: "{{ something.from.the.plugin }}"
|
||||
```
|
||||
|
||||
- `configMapRef.name`: A `ConfigMap` name containing the plugin configuration to use for RPC call.
|
||||
@@ -232,7 +230,6 @@ metadata:
|
||||
name: fb-matrix
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
|
||||