Compare commits

...

1 Commits

Author SHA1 Message Date
Joseph Lombrozo
e0131d3e65 sibling of b1b044e74d9f4230d1ca86384c94c07af1936fd4 2024-02-11 04:46:33 +00:00
433 changed files with 5128 additions and 38263 deletions

View File

@@ -18,10 +18,8 @@ hack/
docs/
examples/
.github/
!test/container
!test/e2e/testdata
!test/fixture
!test/remote
!test/container
!hack/installers
!hack/gpg-wrapper.sh
!hack/git-verify-wrapper.sh

View File

@@ -360,7 +360,6 @@ jobs:
name: Run end-to-end tests
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14]
needs:
@@ -374,7 +373,7 @@ jobs:
ARGOCD_E2E_K3S: "true"
ARGOCD_IN_CI: "true"
ARGOCD_E2E_APISERVER_PORT: "8088"
ARGOCD_APPLICATION_NAMESPACES: "argocd-e2e-external,argocd-e2e-external-2"
ARGOCD_APPLICATION_NAMESPACES: "argocd-e2e-external"
ARGOCD_SERVER: "127.0.0.1:8088"
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
@@ -429,7 +428,7 @@ jobs:
run: |
docker pull ghcr.io/dexidp/dex:v2.37.0
docker pull argoproj/argo-cd-ci-builder:v1.0.0
docker pull redis:7.0.14-alpine
docker pull redis:7.0.11-alpine
- name: Create target directory for binaries in the build-process
run: |
mkdir -p dist

View File

@@ -265,13 +265,11 @@ jobs:
set -xue
SOURCE_TAG=${{ github.ref_name }}
VERSION_REF="${SOURCE_TAG#*v}"
COMMIT_HASH=$(git rev-parse HEAD)
if echo "$VERSION_REF" | grep -E -- '^[0-9]+\.[0-9]+\.0-rc1';then
VERSION=$(awk 'BEGIN {FS=OFS="."} {$2++; print}' <<< "${VERSION_REF%-rc1}")
echo "Updating VERSION to: $VERSION"
echo "UPDATE_VERSION=true" >> $GITHUB_ENV
echo "NEW_VERSION=$VERSION" >> $GITHUB_ENV
echo "COMMIT_HASH=$COMMIT_HASH" >> $GITHUB_ENV
else
echo "Not updating VERSION"
echo "UPDATE_VERSION=false" >> $GITHUB_ENV
@@ -280,10 +278,6 @@ jobs:
- name: Update VERSION on master branch
run: |
echo ${{ env.NEW_VERSION }} > VERSION
# Replace the 'project-release: vX.X.X-rcX' line in SECURITY-INSIGHTS.yml
sed -i "s/project-release: v.*$/project-release: v${{ env.NEW_VERSION }}/" SECURITY-INSIGHTS.yml
# Update the 'commit-hash: XXXXXXX' line in SECURITY-INSIGHTS.yml
sed -i "s/commit-hash: .*/commit-hash: ${{ env.NEW_VERSION }}/" SECURITY-INSIGHTS.yml
if: ${{ env.UPDATE_VERSION == 'true' }}
- name: Create PR to update VERSION on master branch

View File

@@ -2,9 +2,7 @@
** @argoproj/argocd-approvers
# Docs
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/USERS.md @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/mkdocs.yml @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
# CI
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci

View File

@@ -1 +0,0 @@
Please refer to [the Contribution Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/code-contributions/)

View File

@@ -49,7 +49,7 @@ ARGOCD_E2E_DEX_PORT?=5556
ARGOCD_E2E_YARN_HOST?=localhost
ARGOCD_E2E_DISABLE_AUTH?=
ARGOCD_E2E_TEST_TIMEOUT?=60m
ARGOCD_E2E_TEST_TIMEOUT?=45m
ARGOCD_IN_CI?=false
ARGOCD_TEST_E2E?=true
@@ -386,9 +386,9 @@ test: test-tools-image
.PHONY: test-local
test-local:
if test "$(TEST_MODULE)" = ""; then \
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -coverprofile=coverage.out; \
./hack/test.sh -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
else \
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
./hack/test.sh -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
.PHONY: test-race
@@ -400,9 +400,9 @@ test-race: test-tools-image
.PHONY: test-race-local
test-race-local:
if test "$(TEST_MODULE)" = ""; then \
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -race -coverprofile=coverage.out; \
./hack/test.sh -race -coverprofile=coverage.out `go list ./... | grep -v 'test/e2e'`; \
else \
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -race -coverprofile=coverage.out; \
./hack/test.sh -race -coverprofile=coverage.out "$(TEST_MODULE)"; \
fi
# Run the E2E test suite. E2E test servers (see start-e2e target) must be
@@ -416,7 +416,7 @@ test-e2e:
test-e2e-local: cli-local
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
export GO111MODULE=off
DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v
ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v ./test/e2e
# Spawns a shell in the test server container for debugging purposes
debug-test-server: test-tools-image
@@ -438,7 +438,6 @@ start-e2e: test-tools-image
start-e2e-local: mod-vendor-local dep-ui-local cli-local
kubectl create ns argocd-e2e || true
kubectl create ns argocd-e2e-external || true
kubectl create ns argocd-e2e-external-2 || true
kubectl config set-context --current --namespace=argocd-e2e
kustomize build test/manifests/base | kubectl apply -f -
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
@@ -459,8 +458,8 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
ARGOCD_ZJWT_FEATURE_FLAG=always \
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
BIN_MODE=$(ARGOCD_BIN_MODE) \
ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external \
ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external \
ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \
ARGOCD_E2E_TEST=true \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
@@ -558,7 +557,6 @@ install-tools-local: install-test-tools-local install-codegen-tools-local instal
install-test-tools-local:
./hack/install.sh kustomize
./hack/install.sh helm-linux
./hack/install.sh gotestsum
# Installs all tools required for running codegen (Linux packages)
.PHONY: install-codegen-tools-local

View File

@@ -1,128 +0,0 @@
header:
schema-version: 1.0.0
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
last-updated: '2023-10-27'
last-reviewed: '2023-10-27'
commit-hash: b71277c6beb949d0199d647a582bc25822b88838
project-url: https://github.com/argoproj/argo-cd
project-release: v2.9.0-rc3
changelog: https://github.com/argoproj/argo-cd/releases
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
project-lifecycle:
status: active
roadmap: https://github.com/orgs/argoproj/projects/25
bug-fixes-only: false
core-maintainers:
- https://github.com/argoproj/argoproj/blob/master/MAINTAINERS.md
release-cycle: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/
release-process: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#release-process
contribution-policy:
accepts-pull-requests: true
accepts-automated-pull-requests: true
automated-tools-list:
- automated-tool: dependabot
action: allowed
path:
- /
- automated-tool: snyk-report
action: allowed
path:
- docs/snyk
comment: |
This tool runs Snyk and generates a report of vulnerabilities in the project's dependencies. The report is
placed in the project's documentation. The workflow is defined here:
https://github.com/argoproj/argo-cd/blob/master/.github/workflows/update-snyk.yaml
contributing-policy: https://argo-cd.readthedocs.io/en/stable/developer-guide/code-contributions/
code-of-conduct: https://github.com/cncf/foundation/blob/master/code-of-conduct.md
documentation:
- https://argo-cd.readthedocs.io/
distribution-points:
- https://github.com/argoproj/argo-cd/releases
- https://quay.io/repository/argoproj/argocd
security-artifacts:
threat-model:
threat-model-created: true
evidence-url:
- https://github.com/argoproj/argoproj/blob/master/docs/argo_threat_model.pdf
- https://github.com/argoproj/argoproj/blob/master/docs/end_user_threat_model.pdf
self-assessment:
self-assessment-created: false
comment: |
An extensive self-assessment was performed for CNCF graduation. Because the self-assessment process was evolving
at the time, no standardized document has been published.
security-testing:
- tool-type: sca
tool-name: Dependabot
tool-version: "2"
tool-url: https://github.com/dependabot
integration:
ad-hoc: false
ci: false
before-release: false
tool-rulesets:
- https://github.com/argoproj/argo-cd/blob/master/.github/dependabot.yml
- tool-type: sca
tool-name: Snyk
tool-version: latest
tool-url: https://snyk.io/
integration:
ad-hoc: true
ci: true
before-release: false
- tool-type: sast
tool-name: CodeQL
tool-version: latest
tool-url: https://codeql.github.com/
integration:
ad-hoc: false
ci: true
before-release: false
comment: |
We use the default configuration with the latest version.
security-assessments:
- auditor-name: Trail of Bits
auditor-url: https://trailofbits.com
auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/argo_security_final_report.pdf
report-year: 2021
- auditor-name: Ada Logics
auditor-url: https://adalogics.com
auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/argo_security_audit_2022.pdf
report-year: 2022
- auditor-name: Ada Logics
auditor-url: https://adalogics.com
auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/audit_fuzzer_adalogics_2022.pdf
report-year: 2022
comment: |
Part of the audit was performed by Ada Logics, focussed on fuzzing.
- auditor-name: Chainguard
auditor-url: https://chainguard.dev
auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/software_supply_chain_slsa_assessment_chainguard_2023.pdf
report-year: 2023
comment: |
Confirmed the project's release process as achieving SLSA (v0.1) level 3.
security-contacts:
- type: email
value: cncf-argo-security@lists.cncf.io
primary: true
vulnerability-reporting:
accepts-vulnerability-reports: true
email-contact: cncf-argo-security@lists.cncf.io
security-policy: https://github.com/argoproj/argo-cd/security/policy
bug-bounty-available: true
bug-bounty-url: https://hackerone.com/ibb/policy_scopes
out-scope:
- vulnerable and outdated components # See https://github.com/argoproj/argo-cd/blob/master/SECURITY.md#a-word-about-security-scanners
- security logging and monitoring failures
dependencies:
third-party-packages: true
dependencies-lists:
- https://github.com/argoproj/argo-cd/blob/master/go.mod
- https://github.com/argoproj/argo-cd/blob/master/Dockerfile
- https://github.com/argoproj/argo-cd/blob/master/ui/package.json
sbom:
- sbom-file: https://github.com/argoproj/argo-cd/releases # Every release's assets include SBOMs.
sbom-format: SPDX
dependencies-lifecycle:
policy-url: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#dependencies-lifecycle-policy
env-dependencies-policy:
policy-url: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#dependencies-lifecycle-policy

View File

@@ -25,8 +25,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [AppDirect](https://www.appdirect.com)
1. [Arctiq Inc.](https://www.arctiq.ca)
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
1. [Autodesk](https://www.autodesk.com)
1. [Axians ACSP](https://www.axians.fr)
2. [Autodesk](https://www.autodesk.com)
1. [Axual B.V.](https://axual.com)
1. [Back Market](https://www.backmarket.com)
1. [Baloise](https://www.baloise.com)
@@ -94,7 +93,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Flexport](https://www.flexport.com/)
1. [Flip](https://flip.id)
1. [Fonoa](https://www.fonoa.com/)
1. [Fortra](https://www.fortra.com)
1. [freee](https://corp.freee.co.jp/en/company/)
1. [Freshop, Inc](https://www.freshop.com/)
1. [Future PLC](https://www.futureplc.com/)
@@ -171,7 +169,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Meican](https://meican.com/)
1. [Meilleurs Agents](https://www.meilleursagents.com/)
1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/)
1. [Mercedes-Benz.io](https://www.mercedes-benz.io/)
1. [Metanet](http://www.metanet.co.kr/en/)
1. [MindSpore](https://mindspore.cn)
1. [Mirantis](https://mirantis.com/)
@@ -212,7 +209,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Patreon](https://www.patreon.com/)
1. [PayPay](https://paypay.ne.jp/)
1. [Peloton Interactive](https://www.onepeloton.com/)
1. [Percona](https://percona.com/)
1. [PGS](https://www.pgs.com)
1. [Pigment](https://www.gopigment.com/)
1. [Pipefy](https://www.pipefy.com/)
@@ -247,9 +243,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Sap Labs](http://sap.com)
1. [Sauce Labs](https://saucelabs.com/)
1. [Schwarz IT](https://jobs.schwarz/it-mission)
1. [SCRM Lidl International Hub](https://scrm.lidl)
1. [SEEK](https://seek.com.au)
1. [Semgrep](https://semgrep.com)
1. [SI Analytics](https://si-analytics.ai)
1. [Skit](https://skit.ai/)
1. [Skyscanner](https://www.skyscanner.net/)

View File

@@ -1 +1 @@
2.9.0
2.9.6

View File

@@ -114,7 +114,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// Log a warning if there are unrecognized generators
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
// desiredApplications is the main list of all expected Applications from all generators in this appset.
desiredApplications, applicationSetReason, err := r.generateApplications(logCtx, applicationSetInfo)
desiredApplications, applicationSetReason, err := r.generateApplications(applicationSetInfo)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -161,9 +161,9 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
if r.EnableProgressiveSyncs {
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
// If appset used progressive sync but stopped, clean up the progressive sync application statuses
logCtx.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
err := r.setAppSetApplicationStatus(ctx, logCtx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
}
@@ -178,7 +178,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
appMap[app.Name] = app
}
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, applications, desiredApplications, appMap)
appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
}
@@ -216,7 +216,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
if r.EnableProgressiveSyncs {
// trigger appropriate application syncs if RollingSync strategy is enabled
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
validApps, err = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
@@ -234,7 +234,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() {
err = r.createOrUpdateInCluster(ctx, logCtx, applicationSetInfo, validApps)
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -248,7 +248,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, err
}
} else {
err = r.createInCluster(ctx, logCtx, applicationSetInfo, validApps)
err = r.createInCluster(ctx, applicationSetInfo, validApps)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -264,7 +264,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, desiredApplications)
err = r.deleteInCluster(ctx, applicationSetInfo, desiredApplications)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
@@ -489,7 +489,7 @@ func getTempApplication(applicationSetTemplate argov1alpha1.ApplicationSetTempla
return &tmplApplication
}
func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, applicationSetInfo argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argov1alpha1.ApplicationSetReasonType, error) {
func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, argov1alpha1.ApplicationSetReasonType, error) {
var res []argov1alpha1.Application
var firstError error
@@ -498,7 +498,7 @@ func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, appli
for _, requestedGenerator := range applicationSetInfo.Spec.Generators {
t, err := generators.Transform(requestedGenerator, r.Generators, applicationSetInfo.Spec.Template, &applicationSetInfo, map[string]interface{}{})
if err != nil {
logCtx.WithError(err).WithField("generator", requestedGenerator).
log.WithError(err).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
firstError = err
@@ -513,7 +513,7 @@ func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, appli
for _, p := range a.Params {
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
if err != nil {
logCtx.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
log.WithError(err).WithField("params", a.Params).WithField("generator", requestedGenerator).
Error("error generating application from params")
if firstError == nil {
@@ -526,8 +526,8 @@ func (r *ApplicationSetReconciler) generateApplications(logCtx *log.Entry, appli
}
}
logCtx.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
logCtx.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
log.WithField("generator", requestedGenerator).Infof("generated %d applications", len(res))
log.WithField("generator", requestedGenerator).Debugf("apps from generator: %+v", res)
}
return res, applicationSetReason, firstError
@@ -541,24 +541,22 @@ func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
}
}
func appControllerIndexer(rawObj client.Object) []string {
// grab the job object, extract the owner...
app := rawObj.(*argov1alpha1.Application)
owner := metav1.GetControllerOf(app)
if owner == nil {
return nil
}
// ...make sure it's a application set...
if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", appControllerIndexer); err != nil {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
// grab the job object, extract the owner...
app := rawObj.(*argov1alpha1.Application)
owner := metav1.GetControllerOf(app)
if owner == nil {
return nil
}
// ...make sure it's a application set...
if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}); err != nil {
return fmt.Errorf("error setting up with manager: %w", err)
}
@@ -602,14 +600,14 @@ func (r *ApplicationSetReconciler) updateCache(ctx context.Context, obj client.O
// - For new applications, it will call create
// - For existing application, it will call update
// The function also adds owner reference to all applications, and uses it to delete them.
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var firstError error
// Creates or updates the application in appList
for _, generatedApp := range desiredApplications {
generatedApp.Namespace = applicationSet.Namespace
appLog := logCtx.WithFields(log.Fields{"app": generatedApp.QualifiedName()})
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
generatedApp.Namespace = applicationSet.Namespace
// Normalize to avoid fighting with the application controller.
generatedApp.Spec = *argoutil.NormalizeApplicationSpec(&generatedApp.Spec)
@@ -705,7 +703,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
// createInCluster will filter from the desiredApplications only the application that needs to be created
// Then it will call createOrUpdateInCluster to do the actual create
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
var createApps []argov1alpha1.Application
current, err := r.getCurrentApplications(ctx, applicationSet)
@@ -728,13 +726,13 @@ func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, logCtx *
}
}
return r.createOrUpdateInCluster(ctx, logCtx, applicationSet, createApps)
return r.createOrUpdateInCluster(ctx, applicationSet, createApps)
}
func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, applicationSet argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
// TODO: Should this use the context param?
var current argov1alpha1.ApplicationList
err := r.Client.List(context.Background(), &current, client.MatchingFields{".metadata.controller": applicationSet.Name}, client.InNamespace(applicationSet.Namespace))
err := r.Client.List(context.Background(), &current, client.MatchingFields{".metadata.controller": applicationSet.Name})
if err != nil {
return nil, fmt.Errorf("error retrieving applications: %w", err)
@@ -745,7 +743,7 @@ func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, app
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
// The function must be called after all generators had been called and generated applications
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
// clusterList, err := argoDB.ListClusters(ctx)
@@ -769,15 +767,15 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
// Delete apps that are not in m[string]bool
var firstError error
for _, app := range current {
logCtx = logCtx.WithField("app", app.QualifiedName())
appLog := log.WithFields(log.Fields{"app": app.Name, "appSet": applicationSet.Name})
_, exists := m[app.Name]
if !exists {
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, logCtx)
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLog)
if err != nil {
logCtx.WithError(err).Error("failed to update Application")
appLog.WithError(err).Error("failed to update Application")
if firstError != nil {
firstError = err
}
@@ -786,14 +784,14 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
err = r.Client.Delete(ctx, &app)
if err != nil {
logCtx.WithError(err).Error("failed to delete Application")
appLog.WithError(err).Error("failed to delete Application")
if firstError != nil {
firstError = err
}
continue
}
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
logCtx.Log(log.InfoLevel, "Deleted application")
appLog.Log(log.InfoLevel, "Deleted application")
}
}
return firstError
@@ -875,21 +873,21 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
return nil
}
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
appDependencyList, appStepMap, err := r.buildAppDependencyList(logCtx, appset, desiredApplications)
appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications)
if err != nil {
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
}
_, err = r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
_, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap)
if err != nil {
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
}
logCtx.Infof("ApplicationSet %v step list:", appset.Name)
log.Infof("ApplicationSet %v step list:", appset.Name)
for i, step := range appDependencyList {
logCtx.Infof("step %v: %+v", i+1, step)
log.Infof("step %v: %+v", i+1, step)
}
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
@@ -897,9 +895,9 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
return nil, fmt.Errorf("failed to build app sync map: %w", err)
}
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap, appMap)
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap)
if err != nil {
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
}
@@ -913,7 +911,7 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
}
// this list tracks which Applications belong to each RollingUpdate step
func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" {
return [][]string{}, map[string]int{}, nil
@@ -940,9 +938,9 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, app
for _, matchExpression := range step.MatchExpressions {
if val, ok := app.Labels[matchExpression.Key]; ok {
valueMatched := labelMatchedExpression(logCtx, val, matchExpression)
valueMatched := labelMatchedExpression(val, matchExpression)
if !valueMatched { // none of the matchExpression values was a match with the Application's labels
if !valueMatched { // none of the matchExpression values was a match with the Application'ss labels
selected = false
break
}
@@ -955,7 +953,7 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, app
if selected {
appDependencyList[i] = append(appDependencyList[i], app.Name)
if val, ok := appStepMap[app.Name]; ok {
logCtx.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
log.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
} else {
appStepMap[app.Name] = i
}
@@ -966,9 +964,9 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, app
return appDependencyList, appStepMap, nil
}
func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
func labelMatchedExpression(val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
if matchExpression.Operator != "In" && matchExpression.Operator != "NotIn" {
logCtx.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator)
log.Errorf("skipping AppSet rollingUpdate step Application selection, invalid matchExpression operator provided: %q ", matchExpression.Operator)
return false
}
@@ -1072,7 +1070,7 @@ func statusStrings(app argov1alpha1.Application) (string, string, string) {
}
// check the status of each Application's status and promote Applications to the next status if needed
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
@@ -1105,7 +1103,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
logCtx.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
log.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Waiting"
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
@@ -1117,15 +1115,15 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
// this covers race conditions where syncs initiated by RollingSync miraculously have a sync time before the transition to Pending state occurred (could be a few seconds)
if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.Add(time.Duration(10)*time.Second).After(currentAppStatus.LastTransitionTime.Time) {
if !app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
logCtx.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name)
log.Warnf("Application %v was synced less than 10s prior to entering Pending status, we'll assume the AppSet controller triggered this sync and update its status to Progressing", app.Name)
}
logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
@@ -1134,7 +1132,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
logCtx.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
log.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
@@ -1142,7 +1140,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
logCtx.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
log.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
@@ -1152,7 +1150,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
appStatuses = append(appStatuses, currentAppStatus)
}
err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
if err != nil {
return nil, fmt.Errorf("failed to set AppSet application statuses: %w", err)
}
@@ -1161,7 +1159,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
// check Applications that are in Waiting status and promote them to Pending if needed
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
@@ -1203,7 +1201,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if maxUpdate != nil {
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
if err != nil {
logCtx.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
log.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
}
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
@@ -1213,13 +1211,13 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
maxUpdateAllowed = false
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
log.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
}
}
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
logCtx.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
log.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
appStatus.LastTransitionTime = &now
appStatus.Status = "Pending"
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
@@ -1232,7 +1230,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
}
}
err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
if err != nil {
return nil, fmt.Errorf("failed to set AppSet app status: %w", err)
}
@@ -1294,7 +1292,7 @@ func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplica
// setApplicationSetApplicationStatus updates the ApplicatonSet's status field
// with any new/changed Application statuses.
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
needToUpdateStatus := false
if len(applicationStatuses) != len(applicationSet.Status.ApplicationStatus) {
@@ -1328,7 +1326,7 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
err := r.Client.Status().Update(ctx, applicationSet)
if err != nil {
logCtx.Errorf("unable to set application set status: %v", err)
log.Errorf("unable to set application set status: %v", err)
return fmt.Errorf("unable to set application set status: %v", err)
}
@@ -1343,7 +1341,7 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
return nil
}
func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
rolloutApps := []argov1alpha1.Application{}
for i := range validApps {
pruneEnabled := false
@@ -1363,7 +1361,7 @@ func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, appl
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
log.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
validApps[i], _ = syncApplication(validApps[i], pruneEnabled)
}
rolloutApps = append(rolloutApps, validApps[i])
@@ -1407,51 +1405,29 @@ func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
CreateFunc: func(e event.CreateEvent) bool {
// if we are the owner and there is a create event, we most likely created it and do not need to
// re-reconcile
if log.IsLevelEnabled(log.DebugLevel) {
var appName string
app, isApp := e.Object.(*argov1alpha1.Application)
if isApp {
appName = app.QualifiedName()
}
log.WithField("app", appName).Debugln("received create event from owning an application")
}
log.Debugln("received create event from owning an application")
return false
},
DeleteFunc: func(e event.DeleteEvent) bool {
if log.IsLevelEnabled(log.DebugLevel) {
var appName string
app, isApp := e.Object.(*argov1alpha1.Application)
if isApp {
appName = app.QualifiedName()
}
log.WithField("app", appName).Debugln("received delete event from owning an application")
}
log.Debugln("received delete event from owning an application")
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
log.Debugln("received update event from owning an application")
appOld, isApp := e.ObjectOld.(*argov1alpha1.Application)
if !isApp {
return false
}
logCtx := log.WithField("app", appOld.QualifiedName())
logCtx.Debugln("received update event from owning an application")
appNew, isApp := e.ObjectNew.(*argov1alpha1.Application)
if !isApp {
return false
}
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
log.Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
return requeue
},
GenericFunc: func(e event.GenericEvent) bool {
if log.IsLevelEnabled(log.DebugLevel) {
var appName string
app, isApp := e.Object.(*argov1alpha1.Application)
if isApp {
appName = app.QualifiedName()
}
log.WithField("app", appName).Debugln("received generic event from owning an application")
}
log.Debugln("received generic event from owning an application")
return true
},
}

View File

@@ -218,7 +218,7 @@ func TestExtractApplications(t *testing.T) {
Cache: &fakeCache{},
}
got, reason, err := r.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
got, reason, err := r.generateApplications(v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
@@ -331,7 +331,7 @@ func TestMergeTemplateApplications(t *testing.T) {
KubeClientset: kubefake.NewSimpleClientset(),
}
got, _, _ := r.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
got, _, _ := r.generateApplications(v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
@@ -1282,7 +1282,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
initObjs = append(initObjs, &a)
}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -1291,8 +1291,8 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
Cache: &fakeCache{},
}
err = r.createOrUpdateInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
assert.NoError(t, err)
err = r.createOrUpdateInCluster(context.TODO(), c.appSet, c.desiredApps)
assert.Nil(t, err)
for _, obj := range c.expected {
got := &v1alpha1.Application{}
@@ -1375,7 +1375,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
initObjs := []crtclient.Object{&app, &appSet}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
@@ -1537,7 +1537,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
initObjs := []crtclient.Object{&app, &appSet}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
@@ -1769,7 +1769,7 @@ func TestCreateApplications(t *testing.T) {
initObjs = append(initObjs, &a)
}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -1778,7 +1778,7 @@ func TestCreateApplications(t *testing.T) {
Cache: &fakeCache{},
}
err = r.createInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.apps)
err = r.createInCluster(context.TODO(), c.appSet, c.apps)
assert.Nil(t, err)
for _, obj := range c.expected {
@@ -1913,7 +1913,7 @@ func TestDeleteInCluster(t *testing.T) {
initObjs = append(initObjs, &temp)
}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -1922,7 +1922,7 @@ func TestDeleteInCluster(t *testing.T) {
KubeClientset: kubefake.NewSimpleClientset(),
}
err = r.deleteInCluster(context.TODO(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
err = r.deleteInCluster(context.TODO(), c.appSet, c.desiredApps)
assert.Nil(t, err)
// For each of the expected objects, verify they exist on the cluster
@@ -2287,15 +2287,7 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&project}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
goodCluster,
}}, nil)
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
@@ -2371,7 +2363,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -2441,7 +2433,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
@@ -2611,7 +2603,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
@@ -2812,7 +2804,7 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
KubeClientset: kubefake.NewSimpleClientset(),
}
gotApp, _, _ := appSetReconciler.generateApplications(log.NewEntry(log.StandardLogger()), v1alpha1.ApplicationSet{
gotApp, _, _ := appSetReconciler.generateApplications(v1alpha1.ApplicationSet{
Spec: v1alpha1.ApplicationSetSpec{
GoTemplate: true,
Generators: []v1alpha1.ApplicationSetGenerator{{
@@ -2922,7 +2914,7 @@ func TestPolicies(t *testing.T) {
},
}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
r := ApplicationSetReconciler{
Client: client,
@@ -3101,7 +3093,7 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
KubeClientset: kubeclientset,
}
err = r.setAppSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appStatuses)
err = r.setAppSetApplicationStatus(context.TODO(), &cc.appSet, cc.appStatuses)
assert.Nil(t, err)
assert.Equal(t, cc.expectedAppStatuses, cc.appSet.Status.ApplicationStatus)
@@ -3864,7 +3856,7 @@ func TestBuildAppDependencyList(t *testing.T) {
KubeClientset: kubeclientset,
}
appDependencyList, appStepMap, err := r.buildAppDependencyList(log.NewEntry(log.StandardLogger()), cc.appSet, cc.apps)
appDependencyList, appStepMap, err := r.buildAppDependencyList(context.TODO(), cc.appSet, cc.apps)
assert.Equal(t, err, nil, "expected no errors, but errors occured")
assert.Equal(t, cc.expectedList, appDependencyList, "expected appDependencyList did not match actual")
assert.Equal(t, cc.expectedStepMap, appStepMap, "expected appStepMap did not match actual")
@@ -5118,7 +5110,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
KubeClientset: kubeclientset,
}
appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps, cc.appStepMap)
appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), &cc.appSet, cc.apps, cc.appStepMap)
// opt out of testing the LastTransitionTime is accurate
for i := range appStatuses {
@@ -5872,7 +5864,7 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
KubeClientset: kubeclientset,
}
appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appSyncMap, cc.appStepMap, cc.appMap)
appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), &cc.appSet, cc.appSyncMap, cc.appStepMap, cc.appMap)
// opt out of testing the LastTransitionTime is accurate
for i := range appStatuses {

View File

@@ -60,9 +60,9 @@ func TestRequeueAfter(t *testing.T) {
"List": generators.NewListGenerator(),
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
"Git": generators.NewGitGenerator(mockServer),
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}, "", []string{""}, true),
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}, "", []string{""}),
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}, "", []string{""}, true),
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}, "", []string{""}),
}
nestedGenerators := map[string]generators.Generator{

View File

@@ -56,12 +56,14 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
return nil, EmptyAppSetGeneratorError
}
noRevisionCache := appSet.RefreshRequired()
var err error
var res []map[string]interface{}
if len(appSetGenerator.Git.Directories) != 0 {
res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
res, err = g.generateParamsForGitDirectories(appSetGenerator, noRevisionCache, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
} else if len(appSetGenerator.Git.Files) != 0 {
res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
res, err = g.generateParamsForGitFiles(appSetGenerator, noRevisionCache, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
} else {
return nil, EmptyAppSetGeneratorError
}
@@ -72,10 +74,10 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
return res, nil
}
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, noRevisionCache bool, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
// Directories, not files
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, noRevisionCache)
if err != nil {
return nil, fmt.Errorf("error getting directories from repo: %w", err)
}
@@ -98,12 +100,12 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
return res, nil
}
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, noRevisionCache bool, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
// Get all files that match the requested path string, removing duplicates
allFiles := make(map[string][]byte)
for _, requestedPath := range appSetGenerator.Git.Files {
files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path)
files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path, noRevisionCache)
if err != nil {
return nil, err
}
@@ -148,9 +150,6 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
return nil, fmt.Errorf("unable to parse file: %v", err)
}
objectsFound = append(objectsFound, singleObj)
} else if len(objectsFound) == 0 {
// If file is valid but empty, add a default empty item
objectsFound = append(objectsFound, map[string]interface{}{})
}
res := []map[string]interface{}{}

View File

@@ -4,173 +4,119 @@ import (
"fmt"
"testing"
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
func Test_generateParamsFromGitFile(t *testing.T) {
defaultContent := []byte(`
values := map[string]string{}
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
foo:
bar: baz
`)
type args struct {
filePath string
fileContent []byte
values map[string]string
useGoTemplate bool
goTemplateOptions []string
pathParamPrefix string
`), values, false, nil, "")
if err != nil {
t.Fatal(err)
}
tests := []struct {
name string
args args
want []map[string]interface{}
wantErr bool
}{
assert.Equal(t, []map[string]interface{}{
{
name: "empty file returns path parameters",
args: args{
filePath: "path/dir/file_name.yaml",
fileContent: []byte(""),
values: map[string]string{},
useGoTemplate: false,
},
want: []map[string]interface{}{
{
"path": "path/dir",
"path.basename": "dir",
"path.filename": "file_name.yaml",
"path.basenameNormalized": "dir",
"path.filenameNormalized": "file-name.yaml",
"path[0]": "path",
"path[1]": "dir",
},
},
},
{
name: "invalid json/yaml file returns error",
args: args{
filePath: "path/dir/file_name.yaml",
fileContent: []byte("this is not json or yaml"),
values: map[string]string{},
useGoTemplate: false,
},
wantErr: true,
},
{
name: "file parameters are added to params",
args: args{
filePath: "path/dir/file_name.yaml",
fileContent: defaultContent,
values: map[string]string{},
useGoTemplate: false,
},
want: []map[string]interface{}{
{
"foo.bar": "baz",
"path": "path/dir",
"path.basename": "dir",
"path.filename": "file_name.yaml",
"path.basenameNormalized": "dir",
"path.filenameNormalized": "file-name.yaml",
"path[0]": "path",
"path[1]": "dir",
},
},
},
{
name: "path parameter are prefixed",
args: args{
filePath: "path/dir/file_name.yaml",
fileContent: defaultContent,
values: map[string]string{},
useGoTemplate: false,
pathParamPrefix: "myRepo",
},
want: []map[string]interface{}{
{
"foo.bar": "baz",
"myRepo.path": "path/dir",
"myRepo.path.basename": "dir",
"myRepo.path.filename": "file_name.yaml",
"myRepo.path.basenameNormalized": "dir",
"myRepo.path.filenameNormalized": "file-name.yaml",
"myRepo.path[0]": "path",
"myRepo.path[1]": "dir",
},
},
},
{
name: "file parameters are added to params with go template",
args: args{
filePath: "path/dir/file_name.yaml",
fileContent: defaultContent,
values: map[string]string{},
useGoTemplate: true,
},
want: []map[string]interface{}{
{
"foo": map[string]interface{}{
"bar": "baz",
},
"path": map[string]interface{}{
"path": "path/dir",
"basename": "dir",
"filename": "file_name.yaml",
"basenameNormalized": "dir",
"filenameNormalized": "file-name.yaml",
"segments": []string{
"path",
"dir",
},
},
},
},
},
{
name: "path parameter are prefixed with go template",
args: args{
filePath: "path/dir/file_name.yaml",
fileContent: defaultContent,
values: map[string]string{},
useGoTemplate: true,
pathParamPrefix: "myRepo",
},
want: []map[string]interface{}{
{
"foo": map[string]interface{}{
"bar": "baz",
},
"myRepo": map[string]interface{}{
"path": map[string]interface{}{
"path": "path/dir",
"basename": "dir",
"filename": "file_name.yaml",
"basenameNormalized": "dir",
"filenameNormalized": "file-name.yaml",
"segments": []string{
"path",
"dir",
},
},
},
},
},
"foo.bar": "baz",
"path": "path/dir",
"path.basename": "dir",
"path.filename": "file_name.yaml",
"path.basenameNormalized": "dir",
"path.filenameNormalized": "file-name.yaml",
"path[0]": "path",
"path[1]": "dir",
},
}, params)
}
func Test_generatePrefixedParamsFromGitFile(t *testing.T) {
values := map[string]string{}
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
foo:
bar: baz
`), values, false, nil, "myRepo")
if err != nil {
t.Fatal(err)
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
params, err := (*GitGenerator)(nil).generateParamsFromGitFile(tt.args.filePath, tt.args.fileContent, tt.args.values, tt.args.useGoTemplate, tt.args.goTemplateOptions, tt.args.pathParamPrefix)
if (err != nil) != tt.wantErr {
t.Errorf("GitGenerator.generateParamsFromGitFile() error = %v, wantErr %v", err, tt.wantErr)
return
}
assert.Equal(t, tt.want, params)
})
assert.Equal(t, []map[string]interface{}{
{
"foo.bar": "baz",
"myRepo.path": "path/dir",
"myRepo.path.basename": "dir",
"myRepo.path.filename": "file_name.yaml",
"myRepo.path.basenameNormalized": "dir",
"myRepo.path.filenameNormalized": "file-name.yaml",
"myRepo.path[0]": "path",
"myRepo.path[1]": "dir",
},
}, params)
}
func Test_generateParamsFromGitFileGoTemplate(t *testing.T) {
values := map[string]string{}
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
foo:
bar: baz
`), values, true, nil, "")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, []map[string]interface{}{
{
"foo": map[string]interface{}{
"bar": "baz",
},
"path": map[string]interface{}{
"path": "path/dir",
"basename": "dir",
"filename": "file_name.yaml",
"basenameNormalized": "dir",
"filenameNormalized": "file-name.yaml",
"segments": []string{
"path",
"dir",
},
},
},
}, params)
}
func Test_generatePrefixedParamsFromGitFileGoTemplate(t *testing.T) {
values := map[string]string{}
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
foo:
bar: baz
`), values, true, nil, "myRepo")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, []map[string]interface{}{
{
"foo": map[string]interface{}{
"bar": "baz",
},
"myRepo": map[string]interface{}{
"path": map[string]interface{}{
"path": "path/dir",
"basename": "dir",
"filename": "file_name.yaml",
"basenameNormalized": "dir",
"filenameNormalized": "file-name.yaml",
"segments": []string{
"path",
"dir",
},
},
},
},
}, params)
}
func TestGitGenerateParamsFromDirectories(t *testing.T) {
@@ -317,7 +263,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
argoCDServiceMock := mocks.Repos{}
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
@@ -613,7 +559,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
argoCDServiceMock := mocks.Repos{}
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
@@ -972,7 +918,7 @@ cluster:
t.Parallel()
argoCDServiceMock := mocks.Repos{}
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
@@ -1322,7 +1268,7 @@ cluster:
t.Parallel()
argoCDServiceMock := mocks.Repos{}
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
var gitGenerator = NewGitGenerator(&argoCDServiceMock)

View File

@@ -1108,7 +1108,7 @@ func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
}
repoServiceMock := &mocks.Repos{}
repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
"some/path.json": []byte("test: content"),
}, nil)
gitGenerator := NewGitGenerator(repoServiceMock)

View File

@@ -27,16 +27,14 @@ type PullRequestGenerator struct {
auth SCMAuthProviders
scmRootCAPath string
allowedSCMProviders []string
enableSCMProviders bool
}
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders, scmRootCAPath string, allowedScmProviders []string, enableSCMProviders bool) Generator {
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders, scmRootCAPath string, allowedScmProviders []string) Generator {
g := &PullRequestGenerator{
client: client,
auth: auth,
scmRootCAPath: scmRootCAPath,
allowedSCMProviders: allowedScmProviders,
enableSCMProviders: enableSCMProviders,
}
g.selectServiceProviderFunc = g.selectServiceProvider
return g
@@ -68,7 +66,7 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
ctx := context.Background()
svc, err := g.selectServiceProviderFunc(ctx, appSetGenerator.PullRequest, applicationSetInfo)
if err != nil {
return nil, fmt.Errorf("failed to select pull request service provider: %w", err)
return nil, fmt.Errorf("failed to select pull request service provider: %v", err)
}
pulls, err := pullrequest.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
@@ -123,18 +121,17 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
// selectServiceProvider selects the provider to get pull requests from the configuration
func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, generatorConfig *argoprojiov1alpha1.PullRequestGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
if !g.enableSCMProviders {
return nil, ErrSCMProvidersDisabled
}
if err := ScmProviderAllowed(applicationSetInfo, generatorConfig, g.allowedSCMProviders); err != nil {
return nil, fmt.Errorf("scm provider not allowed: %w", err)
}
if generatorConfig.Github != nil {
if !ScmProviderAllowed(applicationSetInfo, generatorConfig.Github.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Github.API)
}
return g.github(ctx, generatorConfig.Github, applicationSetInfo)
}
if generatorConfig.GitLab != nil {
providerConfig := generatorConfig.GitLab
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
}
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %v", err)
@@ -143,6 +140,9 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
}
if generatorConfig.Gitea != nil {
providerConfig := generatorConfig.Gitea
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Gitea.API)
}
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %v", err)
@@ -151,6 +151,9 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
}
if generatorConfig.BitbucketServer != nil {
providerConfig := generatorConfig.BitbucketServer
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
}
if providerConfig.BasicAuth != nil {
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
if err != nil {

View File

@@ -278,7 +278,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
cases := []struct {
name string
providerConfig *argoprojiov1alpha1.PullRequestGenerator
expectedError error
expectedError string
}{
{
name: "Error Github",
@@ -287,7 +287,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
{
name: "Error Gitlab",
@@ -296,7 +296,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
{
name: "Error Gitea",
@@ -305,7 +305,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
{
name: "Error Bitbucket",
@@ -314,7 +314,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
}
@@ -330,7 +330,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
"gitea.myorg.com",
"bitbucket.myorg.com",
"azuredevops.myorg.com",
}, true)
})
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
@@ -346,29 +346,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
_, err := pullRequestGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
assert.Error(t, err, "Must return an error")
assert.ErrorAs(t, err, testCaseCopy.expectedError)
assert.Equal(t, testCaseCopy.expectedError, err.Error())
})
}
}
func TestSCMProviderDisabled_PRGenerator(t *testing.T) {
generator := NewPullRequestGenerator(nil, SCMAuthProviders{}, "", []string{}, false)
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "set",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{
Github: &argoprojiov1alpha1.PullRequestGeneratorGithub{
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
}},
},
}
_, err := generator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
assert.ErrorIs(t, err, ErrSCMProvidersDisabled)
}

View File

@@ -2,7 +2,6 @@ package generators
import (
"context"
"errors"
"fmt"
"strings"
"time"
@@ -32,26 +31,24 @@ type SCMProviderGenerator struct {
SCMAuthProviders
scmRootCAPath string
allowedSCMProviders []string
enableSCMProviders bool
}
type SCMAuthProviders struct {
GitHubApps github_app_auth.Credentials
}
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders, scmRootCAPath string, allowedSCMProviders []string, enableSCMProviders bool) Generator {
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders, scmRootCAPath string, allowedSCMProviders []string) Generator {
return &SCMProviderGenerator{
client: client,
SCMAuthProviders: providers,
scmRootCAPath: scmRootCAPath,
allowedSCMProviders: allowedSCMProviders,
enableSCMProviders: enableSCMProviders,
}
}
// Testing generator
func NewTestSCMProviderGenerator(overrideProvider scm_provider.SCMProviderService) Generator {
return &SCMProviderGenerator{overrideProvider: overrideProvider, enableSCMProviders: true}
return &SCMProviderGenerator{overrideProvider: overrideProvider}
}
func (g *SCMProviderGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) time.Duration {
@@ -68,34 +65,14 @@ func (g *SCMProviderGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.A
return &appSetGenerator.SCMProvider.Template
}
var ErrSCMProvidersDisabled = errors.New("scm providers are disabled")
type ErrDisallowedSCMProvider struct {
Provider string
Allowed []string
}
func NewErrDisallowedSCMProvider(provider string, allowed []string) ErrDisallowedSCMProvider {
return ErrDisallowedSCMProvider{
Provider: provider,
Allowed: allowed,
}
}
func (e ErrDisallowedSCMProvider) Error() string {
return fmt.Sprintf("scm provider %q not allowed, must use one of the following: %s", e.Provider, strings.Join(e.Allowed, ", "))
}
func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, generator SCMGeneratorWithCustomApiUrl, allowedScmProviders []string) error {
url := generator.CustomApiUrl()
func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, url string, allowedScmProviders []string) bool {
if url == "" || len(allowedScmProviders) == 0 {
return nil
return true
}
for _, allowedScmProvider := range allowedScmProviders {
if url == allowedScmProvider {
return nil
return true
}
}
@@ -103,9 +80,9 @@ func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, g
common.SecurityField: common.SecurityMedium,
"applicationset": applicationSetInfo.Name,
"appSetNamespace": applicationSetInfo.Namespace,
}).Debugf("attempted to use disallowed SCM %q, must use one of the following: %s", url, strings.Join(allowedScmProviders, ", "))
}).Debugf("attempted to use disallowed SCM %q", url)
return NewErrDisallowedSCMProvider(url, allowedScmProviders)
return false
}
func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
@@ -117,28 +94,26 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
return nil, EmptyAppSetGeneratorError
}
if !g.enableSCMProviders {
return nil, ErrSCMProvidersDisabled
}
ctx := context.Background()
// Create the SCM provider helper.
providerConfig := appSetGenerator.SCMProvider
if err := ScmProviderAllowed(applicationSetInfo, providerConfig, g.allowedSCMProviders); err != nil {
return nil, fmt.Errorf("scm provider not allowed: %w", err)
}
ctx := context.Background()
var provider scm_provider.SCMProviderService
if g.overrideProvider != nil {
provider = g.overrideProvider
} else if providerConfig.Github != nil {
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Github.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Github.API)
}
var err error
provider, err = g.githubProvider(ctx, providerConfig.Github, applicationSetInfo)
if err != nil {
return nil, fmt.Errorf("scm provider: %w", err)
}
} else if providerConfig.Gitlab != nil {
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitlab.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitlab.API)
}
token, err := g.getSecretRef(ctx, providerConfig.Gitlab.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Gitlab token: %v", err)
@@ -148,6 +123,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
return nil, fmt.Errorf("error initializing Gitlab service: %v", err)
}
} else if providerConfig.Gitea != nil {
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitea.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitea.API)
}
token, err := g.getSecretRef(ctx, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Gitea token: %v", err)
@@ -158,6 +136,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
}
} else if providerConfig.BitbucketServer != nil {
providerConfig := providerConfig.BitbucketServer
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
}
var scmError error
if providerConfig.BasicAuth != nil {
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
@@ -172,6 +153,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
return nil, fmt.Errorf("error initializing Bitbucket Server service: %v", scmError)
}
} else if providerConfig.AzureDevOps != nil {
if !ScmProviderAllowed(applicationSetInfo, providerConfig.AzureDevOps.API, g.allowedSCMProviders) {
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.AzureDevOps.API)
}
token, err := g.getSecretRef(ctx, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Azure Devops access token: %v", err)

View File

@@ -174,7 +174,7 @@ func TestSCMProviderGenerateParams(t *testing.T) {
mockProvider := &scm_provider.MockProvider{
Repos: testCaseCopy.repos,
}
scmGenerator := &SCMProviderGenerator{overrideProvider: mockProvider, enableSCMProviders: true}
scmGenerator := &SCMProviderGenerator{overrideProvider: mockProvider}
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "set",
@@ -205,7 +205,7 @@ func TestAllowedSCMProvider(t *testing.T) {
cases := []struct {
name string
providerConfig *argoprojiov1alpha1.SCMProviderGenerator
expectedError error
expectedError string
}{
{
name: "Error Github",
@@ -214,7 +214,7 @@ func TestAllowedSCMProvider(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
{
name: "Error Gitlab",
@@ -223,7 +223,7 @@ func TestAllowedSCMProvider(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
{
name: "Error Gitea",
@@ -232,7 +232,7 @@ func TestAllowedSCMProvider(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
{
name: "Error Bitbucket",
@@ -241,7 +241,7 @@ func TestAllowedSCMProvider(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
{
name: "Error AzureDevops",
@@ -250,7 +250,7 @@ func TestAllowedSCMProvider(t *testing.T) {
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
expectedError: &ErrDisallowedSCMProvider{},
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
},
}
@@ -260,16 +260,13 @@ func TestAllowedSCMProvider(t *testing.T) {
t.Run(testCaseCopy.name, func(t *testing.T) {
t.Parallel()
scmGenerator := &SCMProviderGenerator{
allowedSCMProviders: []string{
"github.myorg.com",
"gitlab.myorg.com",
"gitea.myorg.com",
"bitbucket.myorg.com",
"azuredevops.myorg.com",
},
enableSCMProviders: true,
}
scmGenerator := &SCMProviderGenerator{allowedSCMProviders: []string{
"github.myorg.com",
"gitlab.myorg.com",
"gitea.myorg.com",
"bitbucket.myorg.com",
"azuredevops.myorg.com",
}}
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
@@ -285,29 +282,7 @@ func TestAllowedSCMProvider(t *testing.T) {
_, err := scmGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
assert.Error(t, err, "Must return an error")
assert.ErrorAs(t, err, testCaseCopy.expectedError)
assert.Equal(t, testCaseCopy.expectedError, err.Error())
})
}
}
func TestSCMProviderDisabled_SCMGenerator(t *testing.T) {
generator := &SCMProviderGenerator{enableSCMProviders: false}
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "set",
},
Spec: argoprojiov1alpha1.ApplicationSetSpec{
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
SCMProvider: &argoprojiov1alpha1.SCMProviderGenerator{
Github: &argoprojiov1alpha1.SCMProviderGeneratorGithub{
API: "https://myservice.mynamespace.svc.cluster.local",
},
},
}},
},
}
_, err := generator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
assert.ErrorIs(t, err, ErrSCMProvidersDisabled)
}

View File

@@ -1,5 +0,0 @@
package generators
type SCMGeneratorWithCustomApiUrl interface {
CustomApiUrl() string
}

View File

@@ -13,25 +13,25 @@ type Repos struct {
mock.Mock
}
// GetDirectories provides a mock function with given fields: ctx, repoURL, revision
func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
ret := _m.Called(ctx, repoURL, revision)
// GetDirectories provides a mock function with given fields: ctx, repoURL, revision, noRevisionCache
func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error) {
ret := _m.Called(ctx, repoURL, revision, noRevisionCache)
var r0 []string
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok {
return rf(ctx, repoURL, revision)
if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) ([]string, error)); ok {
return rf(ctx, repoURL, revision, noRevisionCache)
}
if rf, ok := ret.Get(0).(func(context.Context, string, string) []string); ok {
r0 = rf(ctx, repoURL, revision)
if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) []string); ok {
r0 = rf(ctx, repoURL, revision, noRevisionCache)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
r1 = rf(ctx, repoURL, revision)
if rf, ok := ret.Get(1).(func(context.Context, string, string, bool) error); ok {
r1 = rf(ctx, repoURL, revision, noRevisionCache)
} else {
r1 = ret.Error(1)
}
@@ -39,25 +39,25 @@ func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision st
return r0, r1
}
// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern
func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
ret := _m.Called(ctx, repoURL, revision, pattern)
// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern, noRevisionCache
func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error) {
ret := _m.Called(ctx, repoURL, revision, pattern, noRevisionCache)
var r0 map[string][]byte
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (map[string][]byte, error)); ok {
return rf(ctx, repoURL, revision, pattern)
if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) (map[string][]byte, error)); ok {
return rf(ctx, repoURL, revision, pattern, noRevisionCache)
}
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) map[string][]byte); ok {
r0 = rf(ctx, repoURL, revision, pattern)
if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) map[string][]byte); ok {
r0 = rf(ctx, repoURL, revision, pattern, noRevisionCache)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[string][]byte)
}
}
if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
r1 = rf(ctx, repoURL, revision, pattern)
if rf, ok := ret.Get(1).(func(context.Context, string, string, string, bool) error); ok {
r1 = rf(ctx, repoURL, revision, pattern, noRevisionCache)
} else {
r1 = ret.Error(1)
}

View File

@@ -11,6 +11,8 @@ import (
"github.com/argoproj/argo-cd/v2/util/io"
)
//go:generate go run github.com/vektra/mockery/v2@v2.25.1 --name=RepositoryDB
// RepositoryDB Is a lean facade for ArgoDB,
// Using a lean interface makes it easier to test the functionality of the git generator
type RepositoryDB interface {
@@ -25,13 +27,15 @@ type argoCDService struct {
newFileGlobbingEnabled bool
}
//go:generate go run github.com/vektra/mockery/v2@v2.25.1 --name=Repos
type Repos interface {
// GetFiles returns content of files (not directories) within the target repo
GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error)
GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error)
// GetDirectories returns a list of directories (not files) within the target repo
GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error)
GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error)
}
func NewArgoCDService(db db.ArgoDB, submoduleEnabled bool, repoClientset apiclient.Clientset, newFileGlobbingEnabled bool) (Repos, error) {
@@ -43,7 +47,7 @@ func NewArgoCDService(db db.ArgoDB, submoduleEnabled bool, repoClientset apiclie
}, nil
}
func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error) {
repo, err := a.repositoriesDB.GetRepository(ctx, repoURL)
if err != nil {
return nil, fmt.Errorf("error in GetRepository: %w", err)
@@ -55,6 +59,7 @@ func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision s
Revision: revision,
Path: pattern,
NewGitFileGlobbingEnabled: a.newFileGlobbingEnabled,
NoRevisionCache: noRevisionCache,
}
closer, client, err := a.repoServerClientSet.NewRepoServerClient()
if err != nil {
@@ -69,7 +74,7 @@ func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision s
return fileResponse.GetMap(), nil
}
func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error) {
repo, err := a.repositoriesDB.GetRepository(ctx, repoURL)
if err != nil {
return nil, fmt.Errorf("error in GetRepository: %w", err)
@@ -79,6 +84,7 @@ func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revi
Repo: repo,
SubmoduleEnabled: a.submoduleEnabled,
Revision: revision,
NoRevisionCache: noRevisionCache,
}
closer, client, err := a.repoServerClientSet.NewRepoServerClient()

View File

@@ -25,9 +25,10 @@ func TestGetDirectories(t *testing.T) {
repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient)
}
type args struct {
ctx context.Context
repoURL string
revision string
ctx context.Context
repoURL string
revision string
noRevisionCache bool
}
tests := []struct {
name string
@@ -88,11 +89,11 @@ func TestGetDirectories(t *testing.T) {
submoduleEnabled: tt.fields.submoduleEnabled,
repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient},
}
got, err := a.GetDirectories(tt.args.ctx, tt.args.repoURL, tt.args.revision)
if !tt.wantErr(t, err, fmt.Sprintf("GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision)) {
got, err := a.GetDirectories(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)
if !tt.wantErr(t, err, fmt.Sprintf("GetDirectories(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)) {
return
}
assert.Equalf(t, tt.want, got, "GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision)
assert.Equalf(t, tt.want, got, "GetDirectories(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)
})
}
}
@@ -105,10 +106,11 @@ func TestGetFiles(t *testing.T) {
repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient)
}
type args struct {
ctx context.Context
repoURL string
revision string
pattern string
ctx context.Context
repoURL string
revision string
pattern string
noRevisionCache bool
}
tests := []struct {
name string
@@ -175,11 +177,11 @@ func TestGetFiles(t *testing.T) {
submoduleEnabled: tt.fields.submoduleEnabled,
repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient},
}
got, err := a.GetFiles(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)
if !tt.wantErr(t, err, fmt.Sprintf("GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)) {
got, err := a.GetFiles(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)
if !tt.wantErr(t, err, fmt.Sprintf("GetFiles(%v, %v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)) {
return
}
assert.Equalf(t, tt.want, got, "GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)
assert.Equalf(t, tt.want, got, "GetFiles(%v, %v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)
})
}
}

View File

@@ -180,7 +180,7 @@ func secretToCluster(s *corev1.Secret) (*appv1.Cluster, error) {
if val, err := strconv.Atoi(string(shardStr)); err != nil {
log.Warnf("Error while parsing shard in cluster secret '%s': %v", s.Name, err)
} else {
shard = pointer.Int64(int64(val))
shard = pointer.Int64Ptr(int64(val))
}
}
cluster := appv1.Cluster{

View File

@@ -234,7 +234,7 @@
},
{
"type": "string",
"description": "forces application reconciliation if set to 'hard'.",
"description": "forces application reconciliation if set to true.",
"name": "refresh",
"in": "query"
},
@@ -573,7 +573,7 @@
},
{
"type": "string",
"description": "forces application reconciliation if set to 'hard'.",
"description": "forces application reconciliation if set to true.",
"name": "refresh",
"in": "query"
},
@@ -3816,7 +3816,7 @@
},
{
"type": "string",
"description": "forces application reconciliation if set to 'hard'.",
"description": "forces application reconciliation if set to true.",
"name": "refresh",
"in": "query"
},
@@ -4462,9 +4462,6 @@
"clientID": {
"type": "string"
},
"enablePKCEAuthentication": {
"type": "boolean"
},
"idTokenClaims": {
"type": "object",
"additionalProperties": {
@@ -5092,7 +5089,7 @@
}
},
"runtimeRawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this:\n{\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
"type": "object",
"properties": {
"raw": {
@@ -5499,6 +5496,10 @@
"type": "string"
}
},
"clusterName": {
"description": "Deprecated: ClusterName is a legacy field that was always cleared by\nthe system and never used; it will be removed completely in 1.25.\n\nThe name in the go struct is changed to help clients detect\naccidental use.\n\n+optional",
"type": "string"
},
"creationTimestamp": {
"$ref": "#/definitions/v1Time"
},
@@ -5570,8 +5571,8 @@
}
},
"v1ObjectReference": {
"description": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
"type": "object",
"title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
"properties": {
"apiVersion": {
"type": "string",
@@ -6395,13 +6396,6 @@
"type": "string"
}
},
"components": {
"type": "array",
"title": "Components specifies a list of kustomize components to add to the kustomization before building",
"items": {
"type": "string"
}
},
"forceCommonAnnotations": {
"type": "boolean",
"title": "ForceCommonAnnotations specifies whether to force applying common annotations to resources for Kustomize apps"

View File

@@ -6,7 +6,6 @@ import (
"math"
"time"
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
"github.com/argoproj/pkg/stats"
"github.com/redis/go-redis/v9"
log "github.com/sirupsen/logrus"
@@ -46,11 +45,9 @@ const (
func NewCommand() *cobra.Command {
var (
workqueueRateLimit ratelimiter.AppControllerRateLimiterConfig
clientConfig clientcmd.ClientConfig
appResyncPeriod int64
appHardResyncPeriod int64
repoErrorGracePeriod int64
repoServerAddress string
repoServerTimeoutSeconds int
selfHealTimeoutSeconds int
@@ -144,6 +141,7 @@ func NewCommand() *cobra.Command {
}))
kubectl := kubeutil.NewKubectl()
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
errors.CheckError(err)
appController, err = controller.NewApplicationController(
namespace,
settingsMgr,
@@ -155,7 +153,6 @@ func NewCommand() *cobra.Command {
resyncDuration,
hardResyncDuration,
time.Duration(selfHealTimeoutSeconds)*time.Second,
time.Duration(repoErrorGracePeriod)*time.Second,
metricsPort,
metricsCacheExpiration,
metricsAplicationLabels,
@@ -163,7 +160,6 @@ func NewCommand() *cobra.Command {
persistResourceHealth,
clusterFilter,
applicationNamespaces,
&workqueueRateLimit,
)
errors.CheckError(err)
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
@@ -190,7 +186,6 @@ func NewCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().Int64Var(&appResyncPeriod, "app-resync", int64(env.ParseDurationFromEnv("ARGOCD_RECONCILIATION_TIMEOUT", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application resync.")
command.Flags().Int64Var(&appHardResyncPeriod, "app-hard-resync", int64(env.ParseDurationFromEnv("ARGOCD_HARD_RECONCILIATION_TIMEOUT", defaultAppHardResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Time period in seconds for application hard resync.")
command.Flags().Int64Var(&repoErrorGracePeriod, "repo-error-grace-period-seconds", int64(env.ParseDurationFromEnv("ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Grace period in seconds for ignoring consecutive errors while communicating with repo server.")
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
command.Flags().IntVar(&statusProcessors, "status-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS", 20, 0, math.MaxInt32), "Number of application status processors")
@@ -210,15 +205,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that applications are allowed to be reconciled from")
command.Flags().BoolVar(&persistResourceHealth, "persist-resource-health", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH", true), "Enables storing the managed resources health in the Application CRD")
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
// global queue rate limit config
command.Flags().Int64Var(&workqueueRateLimit.BucketSize, "wq-bucket-size", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_SIZE", 500, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket Size, default 500")
command.Flags().Int64Var(&workqueueRateLimit.BucketQPS, "wq-bucket-qps", env.ParseInt64FromEnv("WORKQUEUE_BUCKET_QPS", 50, 1, math.MaxInt64), "Set Workqueue Rate Limiter Bucket QPS, default 50")
// individual item rate limit config
// when WORKQUEUE_FAILURE_COOLDOWN is 0 per item rate limiting is disabled(default)
command.Flags().DurationVar(&workqueueRateLimit.FailureCoolDown, "wq-cooldown-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_FAILURE_COOLDOWN_NS", 0, 0, (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Cooldown duration in ns, default 0(per item rate limiter disabled)")
command.Flags().DurationVar(&workqueueRateLimit.BaseDelay, "wq-basedelay-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_BASE_DELAY_NS", time.Millisecond.Nanoseconds(), time.Nanosecond.Nanoseconds(), (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Base Delay duration in nanoseconds, default 1000000 (1ms)")
command.Flags().DurationVar(&workqueueRateLimit.MaxDelay, "wq-maxdelay-ns", time.Duration(env.ParseInt64FromEnv("WORKQUEUE_MAX_DELAY_NS", time.Second.Nanoseconds(), 1*time.Millisecond.Nanoseconds(), (24*time.Hour).Nanoseconds())), "Set Workqueue Per Item Rate Limiter Max Delay duration in nanoseconds, default 1000000000 (1s)")
command.Flags().Float64Var(&workqueueRateLimit.BackoffFactor, "wq-backoff-factor", env.ParseFloat64FromEnv("WORKQUEUE_BACKOFF_FACTOR", 1.5, 0, math.MaxFloat64), "Set Workqueue Per Item Rate Limiter Backoff Factor, default is 1.5")
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
redisClient = client

View File

@@ -65,7 +65,6 @@ func NewCommand() *cobra.Command {
allowedScmProviders []string
globalPreservedAnnotations []string
globalPreservedLabels []string
enableScmProviders bool
)
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
@@ -108,8 +107,8 @@ func NewCommand() *cobra.Command {
// If the applicationset-namespaces contains only one namespace it corresponds to the current namespace
if len(applicationSetNamespaces) == 1 {
watchedNamespace = (applicationSetNamespaces)[0]
} else if enableScmProviders && len(allowedScmProviders) == 0 {
log.Error("When enabling applicationset in any namespace using applicationset-namespaces, you must either set --enable-scm-providers=false or specify --allowed-scm-providers")
} else if len(allowedScmProviders) == 0 {
log.Error("When enabling applicationset in any namespace using applicationset-namespaces, allowed-scm-providers is required")
os.Exit(1)
}
@@ -163,9 +162,9 @@ func NewCommand() *cobra.Command {
"List": generators.NewListGenerator(),
"Clusters": generators.NewClusterGenerator(mgr.GetClient(), ctx, k8sClient, namespace),
"Git": generators.NewGitGenerator(argoCDService),
"SCMProvider": generators.NewSCMProviderGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders, enableScmProviders),
"SCMProvider": generators.NewSCMProviderGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders),
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
"PullRequest": generators.NewPullRequestGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders, enableScmProviders),
"PullRequest": generators.NewPullRequestGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders),
"Plugin": generators.NewPluginGenerator(mgr.GetClient(), ctx, k8sClient, namespace),
}
@@ -248,8 +247,7 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&debugLog, "debug", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DEBUG", false), "Print debug logs. Takes precedence over loglevel")
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed custom SCM provider API URLs. This restriction does not apply to SCM or PR generators which do not accept a custom API URL. (Default: Empty = all)")
command.Flags().BoolVar(&enableScmProviders, "enable-scm-providers", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_SCM_PROVIDERS", true), "Enable retrieving information from SCM providers, used by the SCM and PR generators (Default: true)")
command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed scm providers. (Default: Empty = all)")
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
command.Flags().BoolVar(&enableNewGitFileGlobbing, "enable-new-git-file-globbing", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING", false), "Enable new globbing in Git files generator.")

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math"
"strings"
"time"
"github.com/argoproj/pkg/stats"
@@ -25,7 +26,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/env"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/kube"
"github.com/argoproj/argo-cd/v2/util/templates"
"github.com/argoproj/argo-cd/v2/util/tls"
traceutil "github.com/argoproj/argo-cd/v2/util/trace"
)
@@ -59,6 +59,7 @@ func NewCommand() *cobra.Command {
repoServerAddress string
dexServerAddress string
disableAuth bool
contentTypes string
enableGZip bool
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
cacheSrc func() (*servercache.Cache, error)
@@ -163,6 +164,11 @@ func NewCommand() *cobra.Command {
baseHRef = rootPath
}
var contentTypesList []string
if contentTypes != "" {
contentTypesList = strings.Split(contentTypes, ";")
}
argoCDOpts := server.ArgoCDServerOpts{
Insecure: insecure,
ListenPort: listenPort,
@@ -178,6 +184,7 @@ func NewCommand() *cobra.Command {
DexServerAddr: dexServerAddress,
DexTLSConfig: dexTlsConfig,
DisableAuth: disableAuth,
ContentTypes: contentTypesList,
EnableGZip: enableGZip,
TLSConfigCustomizer: tlsConfigCustomizer,
Cache: cache,
@@ -212,13 +219,6 @@ func NewCommand() *cobra.Command {
}
}
},
Example: templates.Examples(`
# Start the Argo CD API server with default settings
$ argocd-server
# Start the Argo CD API server on a custom port and enable tracing
$ argocd-server --port 8888 --otlp-address localhost:4317
`),
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
@@ -232,6 +232,7 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json"), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
command.AddCommand(cli.NewVersionCmd(cliName))
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")

View File

@@ -26,26 +26,12 @@ import (
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/localconfig"
sessionutil "github.com/argoproj/argo-cd/v2/util/session"
"github.com/argoproj/argo-cd/v2/util/templates"
)
func NewAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "account",
Short: "Manage account settings",
Example: templates.Examples(`
# List accounts
argocd account list
# Update the current user's password
argocd account update-password
# Can I sync any app?
argocd account can-i sync applications '*'
# Get User information
argocd account get-user-info
`),
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
@@ -157,13 +143,6 @@ func NewAccountGetUserInfoCommand(clientOpts *argocdclient.ClientOptions) *cobra
var command = &cobra.Command{
Use: "get-user-info",
Short: "Get user info",
Example: templates.Examples(`
# Get User information for the currently logged-in user (see 'argocd login')
argocd account get-user-info
# Get User information in yaml format
argocd account get-user-info -o yaml
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

View File

@@ -48,87 +48,6 @@ func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
Example: `# List all clusters
$ argocd admin cluster list
# Add a new cluster
$ argocd admin cluster add my-cluster --name my-cluster --in-cluster-context
# Remove a cluster
argocd admin cluster remove my-cluster
# List all projects
$ argocd admin project list
# Create a new project
$argocd admin project create my-project --src-namespace my-source-namespace --dest-namespace my-dest-namespace
# Update a project
$ argocd admin project update my-project --src-namespace my-updated-source-namespace --dest-namespace my-updated-dest-namespace
# Delete a project
$ argocd admin project delete my-project
# List all settings
$ argocd admin settings list
# Get the current settings
$ argocd admin settings get
# Update settings
$ argocd admin settings update --repository.resync --value 15
# List all applications
$ argocd admin app list
# Get application details
$ argocd admin app get my-app
# Sync an application
$ argocd admin app sync my-app
# Pause an application
$ argocd admin app pause my-app
# Resume an application
$ argocd admin app resume my-app
# List all repositories
$ argocd admin repo list
# Add a repository
$ argocd admin repo add https://github.com/argoproj/my-repo.git
# Remove a repository
$ argocd admin repo remove https://github.com/argoproj/my-repo.git
# Import an application from a YAML file
$ argocd admin app import -f my-app.yaml
# Export an application to a YAML file
$ argocd admin app export my-app -o my-exported-app.yaml
# Access the Argo CD web UI
$ argocd admin dashboard
# List notifications
$ argocd admin notification list
# Get notification details
$ argocd admin notification get my-notification
# Create a new notification
$ argocd admin notification create my-notification -f notification-config.yaml
# Update a notification
$ argocd admin notification update my-notification -f updated-notification-config.yaml
# Delete a notification
$ argocd admin notification delete my-notification
# Reset the initial admin password
$ argocd admin initial-password reset
`,
}
command.AddCommand(NewClusterCommand(clientOpts, pathOpts))
@@ -138,7 +57,7 @@ $ argocd admin initial-password reset
command.AddCommand(NewRepoCommand())
command.AddCommand(NewImportCommand())
command.AddCommand(NewExportCommand())
command.AddCommand(NewDashboardCommand())
command.AddCommand(NewDashboardCommand(clientOpts))
command.AddCommand(NewNotificationsCommand())
command.AddCommand(NewInitialPasswordCommand())

View File

@@ -45,16 +45,6 @@ func NewAppCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "app",
Short: "Manage applications configuration",
Example: `
# Compare results of two reconciliations and print diff
argocd admin app diff-reconcile-results APPNAME [flags]
# Generate declarative config for an application
argocd admin app generate-spec APPNAME
# Reconcile all applications and store reconciliation summary in the specified file
argocd admin app get-reconcile-results APPNAME
`,
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
},
@@ -384,7 +374,7 @@ func reconcileApplications(
)
appStateManager := controller.NewAppStateManager(
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false, 0)
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false)
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector})
if err != nil {
@@ -419,10 +409,7 @@ func reconcileApplications(
sources = append(sources, app.Spec.GetSource())
revisions = append(revisions, app.Spec.GetSource().TargetRevision)
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
if err != nil {
return nil, err
}
res := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
items = append(items, appReconcileResult{
Name: app.Name,
Conditions: app.Status.Conditions,

View File

@@ -124,7 +124,7 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
if replicas > 0 {
distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm)
distributionFunction(&cluster)
cluster.Shard = pointer.Int64(int64(clusterShard))
cluster.Shard = pointer.Int64Ptr(int64(clusterShard))
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
}

View File

@@ -3,7 +3,9 @@ package admin
import (
"fmt"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
@@ -14,11 +16,12 @@ import (
"github.com/argoproj/argo-cd/v2/util/errors"
)
func NewDashboardCommand() *cobra.Command {
func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
port int
address string
compressionStr string
clientConfig clientcmd.ClientConfig
)
cmd := &cobra.Command{
Use: "dashboard",
@@ -28,21 +31,13 @@ func NewDashboardCommand() *cobra.Command {
compression, err := cache.CompressionTypeFromString(compressionStr)
errors.CheckError(err)
errors.CheckError(headless.MaybeStartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression))
clientOpts.Core = true
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression, clientConfig))
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
<-ctx.Done()
},
Example: `# Start the Argo CD Web UI locally on the default port and address
$ argocd admin dashboard
# Start the Argo CD Web UI locally on a custom port and address
$ argocd admin dashboard --port 8080 --address 127.0.0.1
# Start the Argo CD Web UI with GZip compression
$ argocd admin dashboard --redis-compress gzip
`,
}
initialize.InitCommand(cmd)
clientConfig = cli.AddKubectlFlagsToSet(cmd.Flags())
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
cmd.Flags().StringVar(&address, "address", common.DefaultAddressAdminDashboard, "Listen on given address")
cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")

View File

@@ -14,7 +14,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/templates"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/spf13/cobra"
@@ -48,17 +47,6 @@ func NewGenProjectSpecCommand() *cobra.Command {
var command = &cobra.Command{
Use: "generate-spec PROJECT",
Short: "Generate declarative config for a project",
Example: templates.Examples(`
# Generate a YAML configuration for a project named "myproject"
argocd admin projects generate-spec myproject
# Generate a JSON configuration for a project named "anotherproject" and specify an output file
argocd admin projects generate-spec anotherproject --output json --file config.json
# Generate a YAML configuration for a project named "someproject" and write it back to the input file
argocd admin projects generate-spec someproject --inline
`),
Run: func(c *cobra.Command, args []string) {
proj, err := cmdutil.ConstructAppProj(fileURL, args, opts, c)
errors.CheckError(err)

View File

@@ -41,8 +41,6 @@ func NewProjectAllowListGenCommand() *cobra.Command {
var command = &cobra.Command{
Use: "generate-allow-list CLUSTERROLE_PATH PROJ_NAME",
Short: "Generates project allow list from the specified clusterRole file",
Example: `# Generates project allow list from the specified clusterRole file
argocd admin proj generate-allow-list /path/to/clusterrole.yaml my-project`,
Run: func(c *cobra.Command, args []string) {
if len(args) != 2 {
c.HelpFunc()(c, args)

View File

@@ -189,6 +189,7 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
command.Flags().StringVar(&defaultRole, "default-role", "", "name of the default role to use")
@@ -201,55 +202,24 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
// NewRBACValidateCommand returns a new rbac validate command
func NewRBACValidateCommand() *cobra.Command {
var (
policyFile string
namespace string
clientConfig clientcmd.ClientConfig
policyFile string
)
var command = &cobra.Command{
Use: "validate [--policy-file POLICYFILE] [--namespace NAMESPACE]",
Use: "validate --policy-file=POLICYFILE",
Short: "Validate RBAC policy",
Long: `
Validates an RBAC policy for being syntactically correct. The policy must be
a local file or a K8s ConfigMap in the provided namespace, and in either CSV or K8s ConfigMap format.
`,
Example: `
# Check whether a given policy file is valid using a local policy.csv file.
argocd admin settings rbac validate --policy-file policy.csv
# Policy file can also be K8s config map with data keys like argocd-rbac-cm,
# i.e. 'policy.csv' and (optionally) 'policy.default'
argocd admin settings rbac validate --policy-file argocd-rbac-cm.yaml
# If --policy-file is not given, and instead --namespace is giventhe ConfigMap 'argocd-rbac-cm'
# from K8s is used.
argocd admin settings rbac validate --namespace argocd
# Either --policy-file or --namespace must be given.
a local file, and in either CSV or K8s ConfigMap format.
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
if len(args) > 0 {
if policyFile == "" {
c.HelpFunc()(c, args)
log.Fatalf("too many arguments")
log.Fatalf("Please specify policy to validate using --policy-file")
}
if (namespace == "" && policyFile == "") || (namespace != "" && policyFile != "") {
c.HelpFunc()(c, args)
log.Fatalf("please provide exactly one of --policy-file or --namespace")
}
restConfig, err := clientConfig.ClientConfig()
if err != nil {
log.Fatalf("could not get config to create k8s client: %v", err)
}
realClientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
log.Fatalf("could not create k8s client: %v", err)
}
userPolicy, _, _ := getPolicy(ctx, policyFile, realClientset, namespace)
userPolicy, _, _ := getPolicy(ctx, policyFile, nil, "")
if userPolicy != "" {
if err := rbac.ValidatePolicy(userPolicy); err == nil {
fmt.Printf("Policy is valid.\n")
@@ -258,15 +228,11 @@ argocd admin settings rbac validate --namespace argocd
fmt.Printf("Policy is invalid: %v\n", err)
os.Exit(1)
}
} else {
log.Fatalf("Policy is empty or could not be loaded.")
}
},
}
clientConfig = cli.AddKubectlFlagsToCmd(command)
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
command.Flags().StringVar(&namespace, "namespace", "", "namespace to get argo rbac configmap from")
command.Flags().StringVar(&policyFile, "policy-file", "", "path to the policy file to use")
return command
}

View File

@@ -5,42 +5,15 @@ import (
"os"
"testing"
"github.com/argoproj/argo-cd/v2/util/assets"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/argoproj/argo-cd/v2/util/assets"
)
type FakeClientConfig struct {
clientConfig clientcmd.ClientConfig
}
func NewFakeClientConfig(clientConfig clientcmd.ClientConfig) *FakeClientConfig {
return &FakeClientConfig{clientConfig: clientConfig}
}
func (f *FakeClientConfig) RawConfig() (clientcmdapi.Config, error) {
config, err := f.clientConfig.RawConfig()
return config, err
}
func (f *FakeClientConfig) ClientConfig() (*restclient.Config, error) {
return f.clientConfig.ClientConfig()
}
func (f *FakeClientConfig) Namespace() (string, bool, error) {
return f.clientConfig.Namespace()
}
func (f *FakeClientConfig) ConfigAccess() clientcmd.ConfigAccess {
return nil
}
func Test_isValidRBACAction(t *testing.T) {
for k := range validRBACActions {
t.Run(k, func(t *testing.T) {
@@ -227,19 +200,3 @@ p, role:, certificates, get, .*, allow`
require.True(t, ok)
})
}
func TestNewRBACCanCommand(t *testing.T) {
command := NewRBACCanCommand()
require.NotNil(t, command)
assert.Equal(t, "can", command.Name())
assert.Equal(t, "Check RBAC permissions for a role or subject", command.Short)
}
func TestNewRBACValidateCommand(t *testing.T) {
command := NewRBACValidateCommand()
require.NotNil(t, command)
assert.Equal(t, "validate", command.Name())
assert.Equal(t, "Validate RBAC policy", command.Short)
}

View File

@@ -50,7 +50,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/grpc"
argoio "github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/manifeststream"
"github.com/argoproj/argo-cd/v2/util/templates"
"github.com/argoproj/argo-cd/v2/util/text/label"
)
@@ -318,35 +317,6 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
var command = &cobra.Command{
Use: "get APPNAME",
Short: "Get application details",
Example: templates.Examples(`
# Get basic details about the application "my-app" in wide format
argocd app get my-app -o wide
# Get detailed information about the application "my-app" in YAML format
argocd app get my-app -o yaml
# Get details of the application "my-app" in JSON format
argocd get my-app -o json
# Get application details and include information about the current operation
argocd app get my-app --show-operation
# Show application parameters and overrides
argocd app get my-app --show-params
# Refresh application data when retrieving
argocd app get my-app --refresh
# Perform a hard refresh, including refreshing application data and target manifests cache
argocd app get my-app --hard-refresh
# Get application details and display them in a tree format
argocd app get my-app --output tree
# Get application details and display them in a detailed tree format
argocd app get my-app --output tree=detailed
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
if len(args) == 0 {
@@ -431,44 +401,6 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
var command = &cobra.Command{
Use: "logs APPNAME",
Short: "Get logs of application pods",
Example: templates.Examples(`
# Get logs of pods associated with the application "my-app"
argocd app logs my-app
# Get logs of pods associated with the application "my-app" in a specific resource group
argocd app logs my-app --group my-group
# Get logs of pods associated with the application "my-app" in a specific resource kind
argocd app logs my-app --kind my-kind
# Get logs of pods associated with the application "my-app" in a specific namespace
argocd app logs my-app --namespace my-namespace
# Get logs of pods associated with the application "my-app" for a specific resource name
argocd app logs my-app --name my-resource
# Stream logs in real-time for the application "my-app"
argocd app logs my-app -f
# Get the last N lines of logs for the application "my-app"
argocd app logs my-app --tail 100
# Get logs since a specified number of seconds ago
argocd app logs my-app --since-seconds 3600
# Get logs until a specified time (format: "2023-10-10T15:30:00Z")
argocd app logs my-app --until-time "2023-10-10T15:30:00Z"
# Filter logs to show only those containing a specific string
argocd app logs my-app --filter "error"
# Get logs for a specific container within the pods
argocd app logs my-app -c my-container
# Get previously terminated container logs
argocd app logs my-app -p
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -524,8 +456,8 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
} else {
return
}
} // Done with receive message
} // Done with retry
} //Done with receive message
} //Done with retry
},
}
@@ -716,23 +648,6 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
var command = &cobra.Command{
Use: "set APPNAME",
Short: "Set application parameters",
Example: templates.Examples(`
# Set application parameters for the application "my-app"
argocd app set my-app --parameter key1=value1 --parameter key2=value2
# Set and validate application parameters for "my-app"
argocd app set my-app --parameter key1=value1 --parameter key2=value2 --validate
# Set and override application parameters with JSON or YAML file
argocd app set my-app --from-file path/to/parameters.json
# Set and override application parameters with a parameter file
argocd app set my-app --parameter-file path/to/parameter-file.yaml
# Set application parameters and specify the namespace
argocd app set my-app --parameter key1=value1 --parameter key2=value2 --namespace my-namespace
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -889,7 +804,7 @@ func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, n
for i, item := range source.Kustomize.Images {
if argoappv1.KustomizeImage(kustomizeImage).Match(item) {
updated = true
// remove i
//remove i
a := source.Kustomize.Images
copy(a[i:], a[i+1:]) // Shift a[i+1:] left one index.
a[len(a)-1] = "" // Erase last element (write zero value).
@@ -1116,6 +1031,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
defer argoio.Close(conn)
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
errors.CheckError(err)
diffOption.local = local
diffOption.localRepoRoot = localRepoRoot
diffOption.cluster = cluster
@@ -1904,7 +1820,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
Backoff: &argoappv1.Backoff{
Duration: retryBackoffDuration.String(),
MaxDuration: retryBackoffMaxDuration.String(),
Factor: pointer.Int64(retryBackoffFactor),
Factor: pointer.Int64Ptr(retryBackoffFactor),
},
}
}
@@ -2143,7 +2059,7 @@ func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string
} else if watch.degraded && watch.health {
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
healthStatus == string(health.HealthStatusDegraded)
// below are good
//below are good
} else if watch.suspended && watch.health {
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
healthStatus == string(health.HealthStatusSuspended)

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/argoproj/argo-cd/v2/util/templates"
"os"
"strconv"
"text/tabwriter"
@@ -34,22 +33,11 @@ type DisplayedAction struct {
Disabled bool
}
var (
appActionExample = templates.Examples(`
# List all the available actions for an application
argocd app actions list APPNAME
# Run an available action for an application
argocd app actions run APPNAME ACTION --kind KIND [--resource-name RESOURCE] [--namespace NAMESPACE] [--group GROUP]
`)
)
// NewApplicationResourceActionsCommand returns a new instance of an `argocd app actions` command
func NewApplicationResourceActionsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "actions",
Short: "Manage Resource actions",
Example: appActionExample,
Use: "actions",
Short: "Manage Resource actions",
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
@@ -70,10 +58,6 @@ func NewApplicationResourceActionsListCommand(clientOpts *argocdclient.ClientOpt
var command = &cobra.Command{
Use: "list APPNAME",
Short: "Lists available actions on a resource",
Example: templates.Examples(`
# List all the available actions for an application
argocd app actions list APPNAME
`),
}
command.Run = func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -152,10 +136,6 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti
var command = &cobra.Command{
Use: "run APPNAME ACTION",
Short: "Runs an available action on resource(s)",
Example: templates.Examples(`
# Run an available action for an application
argocd app actions run APPNAME ACTION --kind KIND [--resource-name RESOURCE] [--namespace NAMESPACE] [--group GROUP]
`),
}
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource")

View File

@@ -3,7 +3,6 @@ package commands
import (
"fmt"
"os"
"text/tabwriter"
"github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
@@ -19,6 +18,8 @@ import (
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/errors"
argoio "github.com/argoproj/argo-cd/v2/util/io"
"text/tabwriter"
)
func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
@@ -29,7 +30,6 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
var kind string
var group string
var all bool
var project string
command := &cobra.Command{
Use: "patch-resource APPNAME",
Short: "Patch resource in an application",
@@ -46,7 +46,6 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
command.Flags().StringVar(&group, "group", "", "Group")
command.Flags().StringVar(&namespace, "namespace", "", "Namespace")
command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources")
command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
command.Run = func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -78,7 +77,6 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
Kind: pointer.String(gvk.Kind),
Patch: pointer.String(patch),
PatchType: pointer.String(patchType),
Project: pointer.String(project),
})
errors.CheckError(err)
log.Infof("Resource '%s' patched", obj.GetName())
@@ -96,7 +94,6 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
var force bool
var orphan bool
var all bool
var project string
command := &cobra.Command{
Use: "delete-resource APPNAME",
Short: "Delete resource in an application",
@@ -111,7 +108,6 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
command.Flags().BoolVar(&force, "force", false, "Indicates whether to orphan the dependents of the deleted resource")
command.Flags().BoolVar(&orphan, "orphan", false, "Indicates whether to force delete the resource")
command.Flags().BoolVar(&all, "all", false, "Indicates whether to patch multiple matching of resources")
command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
command.Run = func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -143,7 +139,6 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
Kind: pointer.String(gvk.Kind),
Force: &force,
Orphan: &orphan,
Project: pointer.String(project),
})
errors.CheckError(err)
log.Infof("Resource '%s' deleted", obj.GetName())
@@ -255,7 +250,6 @@ func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.Appli
func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var orphaned bool
var output string
var project string
var command = &cobra.Command{
Use: "resources APPNAME",
Short: "List resource of application",
@@ -272,7 +266,6 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
appResourceTree, err := appIf.ResourceTree(ctx, &applicationpkg.ResourcesQuery{
ApplicationName: &appName,
AppNamespace: &appNs,
Project: &project,
})
errors.CheckError(err)
printResources(listAll, orphaned, appResourceTree, output)
@@ -280,6 +273,5 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
}
command.Flags().BoolVar(&orphaned, "orphaned", false, "Lists only orphaned resources")
command.Flags().StringVar(&output, "output", "", "Provides the tree view of the resources")
command.Flags().StringVar(&project, "project", "", `The name of the application's project - specifying this allows the command to report "not found" instead of "permission denied" if the app does not exist`)
return command
}

View File

@@ -67,10 +67,6 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
var command = &cobra.Command{
Use: "get APPSETNAME",
Short: "Get ApplicationSet details",
Example: templates.Examples(`
# Get ApplicationSets
argocd appset get APPSETNAME
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

View File

@@ -15,9 +15,7 @@ func NewBcryptCmd() *cobra.Command {
)
var bcryptCmd = &cobra.Command{
Use: "bcrypt",
Short: "Generate bcrypt hash for any password",
Example: `# Generate bcrypt hash for any password
argocd account bcrypt --password YOUR_PASSWORD`,
Short: "Generate bcrypt hash for the admin password",
Run: func(cmd *cobra.Command, args []string) {
bytePassword := []byte(password)
// Hashing the password

View File

@@ -485,23 +485,6 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
}
},
Example: `
# List Clusters in Default "Wide" Format
argocd cluster list
# List Cluster via specifing the server
argocd cluster list --server <ARGOCD_SERVER_ADDRESS>
# List Clusters in JSON Format
argocd cluster list -o json --server <ARGOCD_SERVER_ADDRESS>
# List Clusters in YAML Format
argocd cluster list -o yaml --server <ARGOCD_SERVER_ADDRESS>
# List Clusters that have been added to your Argo CD
argocd cluster list -o server <ARGOCD_SERVER_ADDRESS>
`,
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|server")
return command

View File

@@ -211,13 +211,6 @@ compdef _argocd argocd
Optionally, also add the following, in case you are getting errors involving compdef & compinit such as command not found: compdef:
autoload -Uz compinit
compinit
`,
Example: `# For bash
$ source <(argocd completion bash)
# For zsh
$ argocd completion zsh > _argocd
$ source _argocd
`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {

View File

@@ -22,14 +22,6 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
Use: "context [CONTEXT]",
Aliases: []string{"ctx"},
Short: "Switch between contexts",
Example: `# List Argo CD Contexts
argocd context
# Switch Argo CD context
argocd context cd.argoproj.io
# Delete Argo CD context
argocd context cd.argoproj.io --delete`,
Run: func(c *cobra.Command, args []string) {
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)

View File

@@ -14,7 +14,6 @@ import (
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/errors"
argoio "github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/templates"
)
// NewGPGCommand returns a new instance of an `argocd repo` command
@@ -43,17 +42,6 @@ func NewGPGListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "list",
Short: "List configured GPG public keys",
Example: templates.Examples(`
# List all configured GPG public keys in wide format (default).
argocd gpg list
# List all configured GPG public keys in JSON format.
argocd gpg list -o json
# List all configured GPG public keys in YAML format.
argocd gpg list -o yaml
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -84,17 +72,6 @@ func NewGPGGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "get KEYID",
Short: "Get the GPG public key with ID <KEYID> from the server",
Example: templates.Examples(`
# Get a GPG public key with the specified KEYID in wide format (default).
argocd gpg get KEYID
# Get a GPG public key with the specified KEYID in JSON format.
argocd gpg get KEYID -o json
# Get a GPG public key with the specified KEYID in YAML format.
argocd gpg get KEYID -o yaml
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -132,11 +109,6 @@ func NewGPGAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "add",
Short: "Adds a GPG public key to the server's keyring",
Example: templates.Examples(`
# Add a GPG public key to the server's keyring from a file.
argocd gpg add --from /path/to/keyfile
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

View File

@@ -153,9 +153,11 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
//
// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will
// not start the local server.
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType) error {
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
clientConfig := cli.AddKubectlFlagsToSet(flags)
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType, clientConfig clientcmd.ClientConfig) error {
if clientConfig == nil {
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
clientConfig = cli.AddKubectlFlagsToSet(flags)
}
startInProcessAPI := clientOpts.Core
if !startInProcessAPI {
// Core mode is enabled on client options. Check the local config to see if we should start the API server.
@@ -244,6 +246,7 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
if !cache2.WaitForCacheSync(ctx.Done(), srv.Initialized) {
log.Fatal("Timed out waiting for project cache to sync")
}
tries := 5
for i := 0; i < tries; i++ {
err = testAPI(ctx, clientOpts)
@@ -265,7 +268,7 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
// If we're in core mode, start the API server on the fly and configure the client `opts` to use it.
// If we're not in core mode, this function call will do nothing.
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone)
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone, nil)
if err != nil {
log.Fatal(err)
}

View File

@@ -18,10 +18,6 @@ func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comma
Use: "logout CONTEXT",
Short: "Log out from Argo CD",
Long: "Log out from Argo CD",
Example: `# To log out of argocd
$ argocd logout
# This can be helpful for security reasons or when you want to switch between different Argo CD contexts or accounts.
`,
Run: func(c *cobra.Command, args []string) {
if len(args) == 0 {
c.HelpFunc()(c, args)

View File

@@ -26,7 +26,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/git"
"github.com/argoproj/argo-cd/v2/util/gpg"
argoio "github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/templates"
)
type policyOpts struct {
@@ -40,19 +39,6 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "proj",
Short: "Manage projects",
Example: templates.Examples(`
# List all available projects
argocd proj list
# Create a new project with name PROJECT
argocd proj create PROJECT
# Delete the project with name PROJECT
argocd proj delete PROJECT
# Edit the information on project with name PROJECT
argocd proj edit PROJECT
`),
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
@@ -102,13 +88,6 @@ func NewProjectCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
var command = &cobra.Command{
Use: "create PROJECT",
Short: "Create a project",
Example: templates.Examples(`
# Create a new project with name PROJECT
argocd proj create PROJECT
# Create a new project with name PROJECT from a file or URL to a Kubernetes manifest
argocd proj create PROJECT -f FILE|URL
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -139,13 +118,6 @@ func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
var command = &cobra.Command{
Use: "set PROJECT",
Short: "Set project parameters",
Example: templates.Examples(`
# Set project parameters with some allowed cluster resources [RES1,RES2,...] for project with name PROJECT
argocd proj set PROJECT --allow-cluster-resource [RES1,RES2,...]
# Set project parameters with some denied namespaced resources [RES1,RES2,...] for project with name PROJECT
argocd proj set PROJECT ---deny-namespaced-resource [RES1,RES2,...]
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -179,10 +151,6 @@ func NewProjectAddSignatureKeyCommand(clientOpts *argocdclient.ClientOptions) *c
var command = &cobra.Command{
Use: "add-signature-key PROJECT KEY-ID",
Short: "Add GnuPG signature key to project",
Example: templates.Examples(`
# Add GnuPG signature key KEY-ID to project PROJECT
argocd proj add-signature-key PROJECT KEY-ID
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -221,10 +189,6 @@ func NewProjectRemoveSignatureKeyCommand(clientOpts *argocdclient.ClientOptions)
var command = &cobra.Command{
Use: "remove-signature-key PROJECT KEY-ID",
Short: "Remove GnuPG signature key from project",
Example: templates.Examples(`
# Remove GnuPG signature key KEY-ID from project PROJECT
argocd proj remove-signature-key PROJECT KEY-ID
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -275,13 +239,6 @@ func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *co
var command = &cobra.Command{
Use: "add-destination PROJECT SERVER/NAME NAMESPACE",
Short: "Add project destination",
Example: templates.Examples(`
# Add project destination using a server URL (SERVER) in the specified namespace (NAMESPACE) on the project with name PROJECT
argocd proj add-destination PROJECT SERVER NAMESPACE
# Add project destination using a server name (NAME) in the specified namespace (NAMESPACE) on the project with name PROJECT
argocd proj add-destination PROJECT NAME NAMESPACE --name
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -319,10 +276,6 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions)
var command = &cobra.Command{
Use: "remove-destination PROJECT SERVER NAMESPACE",
Short: "Remove project destination",
Example: templates.Examples(`
# Remove the destination (SERVER) from the specified namespace (NAMESPACE) on the project with name PROJECT
argocd proj remove-destination PROJECT SERVER NAMESPACE
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -367,13 +320,6 @@ func NewProjectAddOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOptions)
var command = &cobra.Command{
Use: "add-orphaned-ignore PROJECT GROUP KIND",
Short: "Add a resource to orphaned ignore list",
Example: templates.Examples(`
# Add a resource of the specified GROUP and KIND to orphaned ignore list on the project with name PROJECT
argocd proj add-orphaned-ignore PROJECT GROUP KIND
# Add resources of the specified GROUP and KIND using a NAME pattern to orphaned ignore list on the project with name PROJECT
argocd proj add-orphaned-ignore PROJECT GROUP KIND --name NAME
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -417,15 +363,8 @@ func NewProjectRemoveOrphanedIgnoreCommand(clientOpts *argocdclient.ClientOption
name string
)
var command = &cobra.Command{
Use: "remove-orphaned-ignore PROJECT GROUP KIND",
Use: "remove-orphaned-ignore PROJECT GROUP KIND NAME",
Short: "Remove a resource from orphaned ignore list",
Example: templates.Examples(`
# Remove a resource of the specified GROUP and KIND from orphaned ignore list on the project with name PROJECT
argocd proj remove-orphaned-ignore PROJECT GROUP KIND
# Remove resources of the specified GROUP and KIND using a NAME pattern from orphaned ignore list on the project with name PROJECT
argocd proj remove-orphaned-ignore PROJECT GROUP KIND --name NAME
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -472,10 +411,6 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
var command = &cobra.Command{
Use: "add-source PROJECT URL",
Short: "Add project source repository",
Example: templates.Examples(`
# Add a source repository (URL) to the project with name PROJECT
argocd proj add-source PROJECT URL
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -538,7 +473,7 @@ func modifyResourcesList(list *[]metav1.GroupKind, add bool, listDesc string, gr
}
}
func modifyResourceListCmd(cmdUse, cmdDesc, examples string, clientOpts *argocdclient.ClientOptions, allow bool, namespacedList bool) *cobra.Command {
func modifyResourceListCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.ClientOptions, allow bool, namespacedList bool) *cobra.Command {
var (
listType string
defaultList string
@@ -549,9 +484,8 @@ func modifyResourceListCmd(cmdUse, cmdDesc, examples string, clientOpts *argocdc
defaultList = "allow"
}
var command = &cobra.Command{
Use: cmdUse,
Short: cmdDesc,
Example: templates.Examples(examples),
Use: cmdUse,
Short: cmdDesc,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -600,44 +534,28 @@ func modifyResourceListCmd(cmdUse, cmdDesc, examples string, clientOpts *argocdc
func NewProjectAllowNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-namespace-resource PROJECT GROUP KIND"
desc := "Removes a namespaced API resource from the deny list or add a namespaced API resource to the allow list"
examples := `
# Removes a namespaced API resource with specified GROUP and KIND from the deny list or add a namespaced API resource to the allow list for project PROJECT
argocd proj allow-namespace-resource PROJECT GROUP KIND
`
return modifyResourceListCmd(use, desc, examples, clientOpts, true, true)
return modifyResourceListCmd(use, desc, clientOpts, true, true)
}
// NewProjectDenyNamespaceResourceCommand returns a new instance of an `argocd proj deny-namespace-resource` command
func NewProjectDenyNamespaceResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-namespace-resource PROJECT GROUP KIND"
desc := "Adds a namespaced API resource to the deny list or removes a namespaced API resource from the allow list"
examples := `
# Adds a namespaced API resource with specified GROUP and KIND from the deny list or removes a namespaced API resource from the allow list for project PROJECT
argocd proj deny-namespace-resource PROJECT GROUP KIND
`
return modifyResourceListCmd(use, desc, examples, clientOpts, false, true)
return modifyResourceListCmd(use, desc, clientOpts, false, true)
}
// NewProjectDenyClusterResourceCommand returns a new instance of an `deny-cluster-resource` command
func NewProjectDenyClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "deny-cluster-resource PROJECT GROUP KIND"
desc := "Removes a cluster-scoped API resource from the allow list and adds it to deny list"
examples := `
# Removes a cluster-scoped API resource with specified GROUP and KIND from the allow list and adds it to deny list for project PROJECT
argocd proj deny-cluster-resource PROJECT GROUP KIND
`
return modifyResourceListCmd(use, desc, examples, clientOpts, false, false)
return modifyResourceListCmd(use, desc, clientOpts, false, false)
}
// NewProjectAllowClusterResourceCommand returns a new instance of an `argocd proj allow-cluster-resource` command
func NewProjectAllowClusterResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
use := "allow-cluster-resource PROJECT GROUP KIND"
desc := "Adds a cluster-scoped API resource to the allow list and removes it from deny list"
examples := `
# Adds a cluster-scoped API resource with specified GROUP and KIND to the allow list and removes it from deny list for project PROJECT
argocd proj allow-cluster-resource PROJECT GROUP KIND
`
return modifyResourceListCmd(use, desc, examples, clientOpts, true, false)
return modifyResourceListCmd(use, desc, clientOpts, true, false)
}
// NewProjectRemoveSourceCommand returns a new instance of an `argocd proj remove-src` command
@@ -645,10 +563,6 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr
var command = &cobra.Command{
Use: "remove-source PROJECT URL",
Short: "Remove project source repository",
Example: templates.Examples(`
# Remove URL source repository to project PROJECT
argocd proj remove-source PROJECT URL
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -689,10 +603,6 @@ func NewProjectDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
var command = &cobra.Command{
Use: "delete PROJECT",
Short: "Delete project",
Example: templates.Examples(`
# Delete the project with name PROJECT
argocd proj delete PROJECT
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -736,13 +646,6 @@ func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
var command = &cobra.Command{
Use: "list",
Short: "List projects",
Example: templates.Examples(`
# List all available projects
argocd proj list
# List all available projects in yaml format
argocd proj list -o yaml
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -908,14 +811,6 @@ func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
var command = &cobra.Command{
Use: "get PROJECT",
Short: "Get project details",
Example: templates.Examples(`
# Get details from project PROJECT
argocd proj get PROJECT
# Get details from project PROJECT in yaml format
argocd proj get PROJECT -o yaml
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -953,10 +848,6 @@ func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
var command = &cobra.Command{
Use: "edit PROJECT",
Short: "Edit project",
Example: templates.Examples(`
# Edit the information on project with name PROJECT
argocd proj edit PROJECT
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

View File

@@ -18,7 +18,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/jwt"
"github.com/argoproj/argo-cd/v2/util/templates"
)
const (
@@ -57,30 +56,6 @@ func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cob
var command = &cobra.Command{
Use: "add-policy PROJECT ROLE-NAME",
Short: "Add a policy to a project role",
Example: `# Before adding new policy
$ argocd proj role get test-project test-role
Role Name: test-role
Description:
Policies:
p, proj:test-project:test-role, projects, get, test-project, allow
JWT Tokens:
ID ISSUED-AT EXPIRES-AT
1696759698 2023-10-08T11:08:18+01:00 (3 hours ago) <none>
# Add a new policy to allow update to the project
$ argocd proj role add-policy test-project test-role -a update -p allow -o project
# Policy should be updated
$ argocd proj role get test-project test-role
Role Name: test-role
Description:
Policies:
p, proj:test-project:test-role, projects, get, test-project, allow
p, proj:test-project:test-role, applications, update, test-project/project, allow
JWT Tokens:
ID ISSUED-AT EXPIRES-AT
1696759698 2023-10-08T11:08:18+01:00 (3 hours ago) <none>
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -118,30 +93,6 @@ func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *
var command = &cobra.Command{
Use: "remove-policy PROJECT ROLE-NAME",
Short: "Remove a policy from a role within a project",
Example: `List the policy of the test-role before removing a policy
$ argocd proj role get test-project test-role
Role Name: test-role
Description:
Policies:
p, proj:test-project:test-role, projects, get, test-project, allow
p, proj:test-project:test-role, applications, update, test-project/project, allow
JWT Tokens:
ID ISSUED-AT EXPIRES-AT
1696759698 2023-10-08T11:08:18+01:00 (3 hours ago) <none>
# Remove the policy to allow update to objects
$ argocd proj role remove-policy test-project test-role -a update -p allow -o project
# The role should be removed now.
$ argocd proj role get test-project test-role
Role Name: test-role
Description:
Policies:
p, proj:test-project:test-role, projects, get, test-project, allow
JWT Tokens:
ID ISSUED-AT EXPIRES-AT
1696759698 2023-10-08T11:08:18+01:00 (4 hours ago) <none>
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -189,11 +140,6 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
var command = &cobra.Command{
Use: "create PROJECT ROLE-NAME",
Short: "Create a project role",
Example: templates.Examples(`
# Create a project role in the "my-project" project with the name "my-role".
argocd proj role create my-project my-role --description "My project role description"
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -228,9 +174,8 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
// NewProjectRoleDeleteCommand returns a new instance of an `argocd proj role delete` command
func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ROLE-NAME",
Short: "Delete a project role",
Example: `$ argocd proj role delete test-project test-role`,
Use: "delete PROJECT ROLE-NAME",
Short: "Delete a project role",
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -278,15 +223,8 @@ func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *c
tokenID string
)
var command = &cobra.Command{
Use: "create-token PROJECT ROLE-NAME",
Short: "Create a project token",
Example: `$ argocd proj role create-token test-project test-role
Create token succeeded for proj:test-project:test-role.
ID: f316c466-40bd-4cfd-8a8c-1392e92255d4
Issued At: 2023-10-08T15:21:40+01:00
Expires At: Never
Token: xxx
`,
Use: "create-token PROJECT ROLE-NAME",
Short: "Create a project token",
Aliases: []string{"token-create"},
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -350,13 +288,8 @@ func NewProjectRoleListTokensCommand(clientOpts *argocdclient.ClientOptions) *co
useUnixTime bool
)
var command = &cobra.Command{
Use: "list-tokens PROJECT ROLE-NAME",
Short: "List tokens for a given role.",
Example: `$ argocd proj role list-tokens test-project test-role
ID ISSUED AT EXPIRES AT
f316c466-40bd-4cfd-8a8c-1392e92255d4 2023-10-08T15:21:40+01:00 Never
fa9d3517-c52d-434c-9bff-215b38508842 2023-10-08T11:08:18+01:00 Never
`,
Use: "list-tokens PROJECT ROLE-NAME",
Short: "List tokens for a given role.",
Aliases: []string{"list-token", "token-list"},
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -406,35 +339,8 @@ fa9d3517-c52d-434c-9bff-215b38508842 2023-10-08T11:08:18+01:00 Never
// NewProjectRoleDeleteTokenCommand returns a new instance of an `argocd proj role delete-token` command
func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
Short: "Delete a project token",
Example: `#Create project test-project
$ argocd proj create test-project
# Create a role associated with test-project
$ argocd proj role create test-project test-role
Role 'test-role' created
# Create test-role associated with test-project
$ argocd proj role create-token test-project test-role
Create token succeeded for proj:test-project:test-role.
ID: c312450e-12e1-4e0d-9f65-fac9cb027b32
Issued At: 2023-10-08T13:58:57+01:00
Expires At: Never
Token: xxx
# Get test-role id to input into the delete-token command below
$ argocd proj role get test-project test-role
Role Name: test-role
Description:
Policies:
p, proj:test-project:test-role, projects, get, test-project, allow
JWT Tokens:
ID ISSUED-AT EXPIRES-AT
1696769937 2023-10-08T13:58:57+01:00 (6 minutes ago) <none>
$ argocd proj role delete-token test-project test-role 1696769937
`,
Use: "delete-token PROJECT ROLE-NAME ISSUED-AT",
Short: "Delete a project token",
Aliases: []string{"token-delete", "remove-token"},
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -483,15 +389,6 @@ func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List all the roles in a project",
Example: templates.Examples(`
# This command will list all the roles in argocd-project in a default table format.
argocd proj role list PROJECT
# List the roles in the project in formats like json, yaml, wide, or name.
argocd proj role list PROJECT --output json
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -527,16 +424,6 @@ func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
var command = &cobra.Command{
Use: "get PROJECT ROLE-NAME",
Short: "Get the details of a specific role",
Example: `$ argocd proj role get test-project test-role
Role Name: test-role
Description:
Policies:
p, proj:test-project:test-role, projects, get, test-project, allow
JWT Tokens:
ID ISSUED-AT EXPIRES-AT
1696774900 2023-10-08T15:21:40+01:00 (4 minutes ago) <none>
1696759698 2023-10-08T11:08:18+01:00 (4 hours ago) <none>
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

View File

@@ -125,23 +125,6 @@ func NewProjectWindowsAddWindowCommand(clientOpts *argocdclient.ClientOptions) *
var command = &cobra.Command{
Use: "add PROJECT",
Short: "Add a sync window to a project",
Example: `# Add a 1 hour allow sync window
argocd proj windows add PROJECT \
--kind allow \
--schedule "0 22 * * *" \
--duration 1h \
--applications "*"
# Add a deny sync window with the ability to manually sync.
argocd proj windows add PROJECT \
--kind deny \
--schedule "30 10 * * *" \
--duration 30m \
--applications "prod-\\*,website" \
--namespaces "default,\\*-prod" \
--clusters "prod,staging" \
--manual-sync
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -175,7 +158,7 @@ argocd proj windows add PROJECT \
return command
}
// NewProjectWindowsDeleteCommand returns a new instance of an `argocd proj windows delete` command
// NewProjectWindowsAddWindowCommand returns a new instance of an `argocd proj windows delete` command
func NewProjectWindowsDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var command = &cobra.Command{
Use: "delete PROJECT ID",
@@ -222,10 +205,6 @@ func NewProjectWindowsUpdateCommand(clientOpts *argocdclient.ClientOptions) *cob
Use: "update PROJECT ID",
Short: "Update a project sync window",
Long: "Update a project sync window. Requires ID which can be found by running \"argocd proj windows list PROJECT\"",
Example: `# Change a sync window's schedule
argocd proj windows update PROJECT ID \
--schedule "0 20 * * *"
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -274,12 +253,6 @@ func NewProjectWindowsListCommand(clientOpts *argocdclient.ClientOptions) *cobra
var command = &cobra.Command{
Use: "list PROJECT",
Short: "List project sync windows",
Example: `# List project windows
argocd proj windows list PROJECT
# List project windows in yaml format
argocd proj windows list PROJECT -o yaml
`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -312,8 +285,8 @@ argocd proj windows list PROJECT -o yaml
func printSyncWindows(proj *v1alpha1.AppProject) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var fmtStr string
headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC", "TIMEZONE"}
fmtStr = strings.Repeat("%s\t", len(headers)) + "\n"
headers := []interface{}{"ID", "STATUS", "KIND", "SCHEDULE", "DURATION", "APPLICATIONS", "NAMESPACES", "CLUSTERS", "MANUALSYNC"}
fmtStr = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
fmt.Fprintf(w, fmtStr, headers...)
if proj.Spec.SyncWindows.HasWindows() {
for i, window := range proj.Spec.SyncWindows {
@@ -327,7 +300,6 @@ func printSyncWindows(proj *v1alpha1.AppProject) {
formatListOutput(window.Namespaces),
formatListOutput(window.Clusters),
formatManualOutput(window.ManualSync),
window.TimeZone,
}
fmt.Fprintf(w, fmtStr, vals...)
}

View File

@@ -84,18 +84,6 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
errors.CheckError(err)
fmt.Printf("Context '%s' updated\n", localCfg.CurrentContext)
},
Example: `
# Reinitiates the login with previous contexts
argocd relogin
# Reinitiates the login with password
argocd relogin --password YOUR_PASSWORD
# Configure direct access using Kubernetes API server
argocd login cd.argoproj.io --core
# If user logged in with - "argocd login cd.argoproj.io" with sso login
# The command - "argocd relogin" will Reinitiates SSO login and updates the server context`,
}
command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate")
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application")

View File

@@ -29,19 +29,6 @@ func NewRepoCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
c.HelpFunc()(c, args)
os.Exit(1)
},
Example: `
# Add git repository connection parameters
argocd repo add git@git.example.com:repos/repo
# Get a Configured Repository by URL
argocd repo get https://github.com/yourusername/your-repo.git
# List Configured Repositories
argocd repo list
# Remove Repository Credentials
argocd repo rm https://github.com/yourusername/your-repo.git
`,
}
command.AddCommand(NewRepoAddCommand(clientOpts))

View File

@@ -17,7 +17,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/git"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/templates"
)
// NewRepoCredsCommand returns a new instance of an `argocd repocreds` command
@@ -25,16 +24,6 @@ func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
var command = &cobra.Command{
Use: "repocreds",
Short: "Manage repository connection parameters",
Example: templates.Examples(`
# Add credentials with user/pass authentication to use for all repositories under the specified URL
argocd repocreds add URL --username USERNAME --password PASSWORD
# List all the configured repository credentials
argocd repocreds list
# Remove credentials for the repositories with speficied URL
argocd repocreds rm URL
`),
Run: func(c *cobra.Command, args []string) {
c.HelpFunc()(c, args)
os.Exit(1)
@@ -195,10 +184,6 @@ func NewRepoCredsRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
var command = &cobra.Command{
Use: "rm CREDSURL",
Short: "Remove repository credentials",
Example: templates.Examples(`
# Remove credentials for the repositories with URL https://git.example.com/repos
argocd repocreds rm https://git.example.com/repos/
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -246,19 +231,6 @@ func NewRepoCredsListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
var command = &cobra.Command{
Use: "list",
Short: "List configured repository credentials",
Example: templates.Examples(`
# List all repo urls
argocd repocreds list
# List all repo urls in json format
argocd repocreds list -o json
# List all repo urls in yaml format
argocd repocreds list -o yaml
# List all repo urls in url format
argocd repocreds list -o url
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

View File

@@ -295,7 +295,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
Backoff: &argoappv1.Backoff{
Duration: appOpts.retryBackoffDuration.String(),
MaxDuration: appOpts.retryBackoffMaxDuration.String(),
Factor: pointer.Int64(appOpts.retryBackoffFactor),
Factor: pointer.Int64Ptr(appOpts.retryBackoffFactor),
},
}
} else if appOpts.retryLimit == 0 {

View File

@@ -115,7 +115,7 @@ func GetOrphanedResourcesSettings(flagSet *pflag.FlagSet, opts ProjectOpts) *v1a
if opts.orphanedResourcesEnabled || warnChanged {
settings := v1alpha1.OrphanedResourcesMonitorSettings{}
if warnChanged {
settings.Warn = pointer.Bool(opts.orphanedResourcesWarn)
settings.Warn = pointer.BoolPtr(opts.orphanedResourcesWarn)
}
return &settings
}

View File

@@ -65,7 +65,7 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
grpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize),
grpc.KeepaliveEnforcementPolicy(
keepalive.EnforcementPolicy{
MinTime: common.GetGRPCKeepAliveEnforcementMinimum(),
MinTime: common.GRPCKeepAliveEnforcementMinimum,
},
),
}

View File

@@ -258,8 +258,6 @@ const (
EnvRedisName = "ARGOCD_REDIS_NAME"
// EnvRedisHaProxyName is the name of the Argo CD Redis HA proxy component, as specified by the value under the LabelKeyAppName label key.
EnvRedisHaProxyName = "ARGOCD_REDIS_HAPROXY_NAME"
// EnvGRPCKeepAliveMin defines the GRPCKeepAliveEnforcementMinimum, used in the grpc.KeepaliveEnforcementPolicy. Expects a "Duration" format (e.g. 10s).
EnvGRPCKeepAliveMin = "ARGOCD_GRPC_KEEP_ALIVE_MIN"
)
// Config Management Plugin related constants
@@ -353,25 +351,10 @@ const (
// gRPC settings
const (
defaultGRPCKeepAliveEnforcementMinimum = 10 * time.Second
)
func GetGRPCKeepAliveEnforcementMinimum() time.Duration {
if GRPCKeepAliveMinStr := os.Getenv(EnvGRPCKeepAliveMin); GRPCKeepAliveMinStr != "" {
GRPCKeepAliveMin, err := time.ParseDuration(GRPCKeepAliveMinStr)
if err != nil {
logrus.Warnf("invalid env var value for %s: cannot parse: %s. Default value %s will be used.", EnvGRPCKeepAliveMin, err, defaultGRPCKeepAliveEnforcementMinimum)
return defaultGRPCKeepAliveEnforcementMinimum
}
return GRPCKeepAliveMin
}
return defaultGRPCKeepAliveEnforcementMinimum
}
func GetGRPCKeepAliveTime() time.Duration {
GRPCKeepAliveEnforcementMinimum = 10 * time.Second
// GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors
return 2 * GetGRPCKeepAliveEnforcementMinimum()
}
GRPCKeepAliveTime = 2 * GRPCKeepAliveEnforcementMinimum
)
// Security severity logging
const (

View File

@@ -1,46 +0,0 @@
package common
import (
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
// Test env var not set for EnvGRPCKeepAliveMin
func Test_GRPCKeepAliveMinNotSet(t *testing.T) {
grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
grpcKeepAliveExpectedMin := defaultGRPCKeepAliveEnforcementMinimum
assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
grpcKeepAliveTime := GetGRPCKeepAliveTime()
assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
}
// Test valid env var set for EnvGRPCKeepAliveMin
func Test_GRPCKeepAliveMinIsSet(t *testing.T) {
numSeconds := 15
os.Setenv(EnvGRPCKeepAliveMin, fmt.Sprintf("%ds", numSeconds))
grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
grpcKeepAliveExpectedMin := time.Duration(numSeconds) * time.Second
assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
grpcKeepAliveTime := GetGRPCKeepAliveTime()
assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
}
// Test invalid env var set for EnvGRPCKeepAliveMin
func Test_GRPCKeepAliveMinIncorrectlySet(t *testing.T) {
numSeconds := 15
os.Setenv(EnvGRPCKeepAliveMin, fmt.Sprintf("%d", numSeconds))
grpcKeepAliveMin := GetGRPCKeepAliveEnforcementMinimum()
grpcKeepAliveExpectedMin := defaultGRPCKeepAliveEnforcementMinimum
assert.Equal(t, grpcKeepAliveExpectedMin, grpcKeepAliveMin)
grpcKeepAliveTime := GetGRPCKeepAliveTime()
assert.Equal(t, 2*grpcKeepAliveExpectedMin, grpcKeepAliveTime)
}

View File

@@ -15,7 +15,6 @@ import (
"sync"
"time"
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
"github.com/argoproj/gitops-engine/pkg/diff"
"github.com/argoproj/gitops-engine/pkg/health"
@@ -57,8 +56,6 @@ import (
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
"github.com/argoproj/argo-cd/v2/util/env"
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/errors"
@@ -66,6 +63,7 @@ import (
"github.com/argoproj/argo-cd/v2/util/helm"
logutils "github.com/argoproj/argo-cd/v2/util/log"
settings_util "github.com/argoproj/argo-cd/v2/util/settings"
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
)
const (
@@ -143,7 +141,6 @@ func NewApplicationController(
appResyncPeriod time.Duration,
appHardResyncPeriod time.Duration,
selfHealTimeout time.Duration,
repoErrorGracePeriod time.Duration,
metricsPort int,
metricsCacheExpiration time.Duration,
metricsApplicationLabels []string,
@@ -151,14 +148,9 @@ func NewApplicationController(
persistResourceHealth bool,
clusterFilter func(cluster *appv1.Cluster) bool,
applicationNamespaces []string,
rateLimiterConfig *ratelimiter.AppControllerRateLimiterConfig,
) (*ApplicationController, error) {
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v", appResyncPeriod, appHardResyncPeriod)
db := db.NewDB(namespace, settingsMgr, kubeClientset)
if rateLimiterConfig == nil {
rateLimiterConfig = ratelimiter.GetDefaultAppRateLimiterConfig()
log.Info("Using default workqueue rate limiter config")
}
ctrl := ApplicationController{
cache: argoCache,
namespace: namespace,
@@ -166,10 +158,10 @@ func NewApplicationController(
kubectl: kubectl,
applicationClientset: applicationClientset,
repoClientset: repoClientset,
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_reconciliation_queue"),
appOperationQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "app_operation_processing_queue"),
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), "project_reconciliation_queue"),
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
appRefreshQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_reconciliation_queue"),
appOperationQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_operation_processing_queue"),
projectRefreshQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "project_reconciliation_queue"),
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
db: db,
statusRefreshTimeout: appResyncPeriod,
statusHardRefreshTimeout: appHardResyncPeriod,
@@ -189,11 +181,10 @@ func NewApplicationController(
appInformer, appLister := ctrl.newApplicationInformerAndLister()
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, indexers)
var err error
_, err = projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if key, err := cache.MetaNamespaceKeyFunc(obj); err == nil {
ctrl.projectRefreshQueue.AddRateLimited(key)
ctrl.projectRefreshQueue.Add(key)
if projMeta, ok := obj.(metav1.Object); ok {
ctrl.InvalidateProjectsCache(projMeta.GetName())
}
@@ -202,7 +193,7 @@ func NewApplicationController(
},
UpdateFunc: func(old, new interface{}) {
if key, err := cache.MetaNamespaceKeyFunc(new); err == nil {
ctrl.projectRefreshQueue.AddRateLimited(key)
ctrl.projectRefreshQueue.Add(key)
if projMeta, ok := new.(metav1.Object); ok {
ctrl.InvalidateProjectsCache(projMeta.GetName())
}
@@ -210,7 +201,6 @@ func NewApplicationController(
},
DeleteFunc: func(obj interface{}) {
if key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err == nil {
// immediately push to queue for deletes
ctrl.projectRefreshQueue.Add(key)
if projMeta, ok := obj.(metav1.Object); ok {
ctrl.InvalidateProjectsCache(projMeta.GetName())
@@ -218,9 +208,6 @@ func NewApplicationController(
}
},
})
if err != nil {
return nil, err
}
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
deploymentInformer := factory.Apps().V1().Deployments()
@@ -248,7 +235,7 @@ func NewApplicationController(
}
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
var err error
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
if err != nil {
return nil, err
@@ -260,7 +247,7 @@ func NewApplicationController(
}
}
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter, argo.NewResourceTracking())
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod)
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth)
ctrl.appInformer = appInformer
ctrl.appLister = appLister
ctrl.projInformer = projInformer
@@ -824,8 +811,8 @@ func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith
ctrl.appRefreshQueue.AddAfter(key, *after)
ctrl.appOperationQueue.AddAfter(key, *after)
} else {
ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.appOperationQueue.AddRateLimited(key)
ctrl.appRefreshQueue.Add(key)
ctrl.appOperationQueue.Add(key)
}
}
}
@@ -1332,7 +1319,8 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
}
kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error {
_, err := ctrl.PatchAppWithWriteBack(context.Background(), app.Name, app.Namespace, types.MergePatchType, patchJSON, metav1.PatchOptions{})
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{})
if err != nil {
// Stop retrying updating deleted application
if apierr.IsNotFound(err) {
@@ -1370,27 +1358,6 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
}
}
// writeBackToInformer writes a just recently updated App back into the informer cache.
// This prevents the situation where the controller operates on a stale app and repeats work
func (ctrl *ApplicationController) writeBackToInformer(app *appv1.Application) {
logCtx := log.WithFields(log.Fields{"application": app.Name, "appNamespace": app.Namespace, "project": app.Spec.Project, "informer-writeBack": true})
err := ctrl.appInformer.GetStore().Update(app)
if err != nil {
logCtx.Errorf("failed to update informer store: %v", err)
return
}
}
// PatchAppWithWriteBack patches an application and writes it back to the informer cache
func (ctrl *ApplicationController) PatchAppWithWriteBack(ctx context.Context, name, ns string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appv1.Application, err error) {
patchedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ns).Patch(ctx, name, pt, data, opts, subresources...)
if err != nil {
return patchedApp, err
}
ctrl.writeBackToInformer(patchedApp)
return patchedApp, err
}
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
patchMs := time.Duration(0) // time spent in doing patch/update calls
setOpMs := time.Duration(0) // time spent in doing Operation patch calls in autosync
@@ -1427,22 +1394,20 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
return
}
app := origApp.DeepCopy()
logCtx := log.WithFields(log.Fields{
"application": app.QualifiedName(),
"level": comparisonLevel,
"dest-server": origApp.Spec.Destination.Server,
"dest-name": origApp.Spec.Destination.Name,
"dest-namespace": origApp.Spec.Destination.Namespace,
})
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
startTime := time.Now()
defer func() {
reconcileDuration := time.Since(startTime)
ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
logCtx.WithFields(log.Fields{
"time_ms": reconcileDuration.Milliseconds(),
"patch_ms": patchMs.Milliseconds(),
"setop_ms": setOpMs.Milliseconds(),
"time_ms": reconcileDuration.Milliseconds(),
"patch_ms": patchMs.Milliseconds(),
"setop_ms": setOpMs.Milliseconds(),
"level": comparisonLevel,
"dest-server": origApp.Spec.Destination.Server,
"dest-name": origApp.Spec.Destination.Name,
"dest-namespace": origApp.Spec.Destination.Namespace,
}).Info("Reconciliation completed")
}()
@@ -1513,15 +1478,10 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
}
now := metav1.Now()
compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources,
compareResult := ctrl.appStateManager.CompareAppState(app, project, revisions, sources,
refreshType == appv1.RefreshTypeHard,
comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources)
if goerrors.Is(err, CompareStateRepoError) {
logCtx.Warnf("Ignoring temporary failed attempt to compare app state against repo: %v", err)
return // short circuit if git error is encountered
}
for k, v := range compareResult.timings {
logCtx = logCtx.WithField(k, v.Milliseconds())
}
@@ -1607,7 +1567,7 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
} else if hardExpired || softExpired {
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
// TODO: find existing Golang bug or create a new one
//TODO: find existing Golang bug or create a new one
reconciledAtStr := "never"
if app.Status.ReconciledAt != nil {
reconciledAtStr = app.Status.ReconciledAt.String()
@@ -1669,7 +1629,8 @@ func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Applica
if err != nil {
logCtx.Errorf("error constructing app spec patch: %v", err)
} else if modified {
_, err := ctrl.PatchAppWithWriteBack(context.Background(), app.Name, app.Namespace, types.MergePatchType, patch, metav1.PatchOptions{})
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
logCtx.Errorf("Error persisting normalized application spec: %v", err)
} else {
@@ -1713,7 +1674,8 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
defer func() {
patchMs = time.Since(start)
}()
_, err = ctrl.PatchAppWithWriteBack(context.Background(), orig.Name, orig.Namespace, types.MergePatchType, patch, metav1.PatchOptions{})
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
_, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
logCtx.Warnf("Error updating application: %v", err)
} else {
@@ -1823,7 +1785,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
start := time.Now()
updatedApp, err := argo.SetAppOperation(appIf, app.Name, &op)
_, err := argo.SetAppOperation(appIf, app.Name, &op)
setOpTime := time.Since(start)
if err != nil {
if goerrors.Is(err, argo.ErrAnotherOperationInProgress) {
@@ -1835,8 +1797,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
} else {
ctrl.writeBackToInformer(updatedApp)
}
message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message, "")
@@ -2024,7 +1984,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
},
)
lister := applisters.NewApplicationLister(informer.GetIndexer())
_, err := informer.AddEventHandler(
informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if !ctrl.canProcessApp(obj) {
@@ -2032,8 +1992,8 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
}
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.appOperationQueue.AddRateLimited(key)
ctrl.appRefreshQueue.Add(key)
ctrl.appOperationQueue.Add(key)
}
},
UpdateFunc: func(old, new interface{}) {
@@ -2053,7 +2013,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
compareWith = CompareWithLatest.Pointer()
}
ctrl.requestAppRefresh(newApp.QualifiedName(), compareWith, nil)
ctrl.appOperationQueue.AddRateLimited(key)
ctrl.appOperationQueue.Add(key)
},
DeleteFunc: func(obj interface{}) {
if !ctrl.canProcessApp(obj) {
@@ -2063,15 +2023,11 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
// for deletes, we immediately add to the refresh queue
ctrl.appRefreshQueue.Add(key)
}
},
},
)
if err != nil {
return nil, nil
}
return informer, lister
}

View File

@@ -59,7 +59,7 @@ type fakeData struct {
applicationNamespaces []string
}
func newFakeController(data *fakeData, repoErr error) *ApplicationController {
func newFakeController(data *fakeData) *ApplicationController {
var clust corev1.Secret
err := yaml.Unmarshal([]byte(fakeCluster), &clust)
if err != nil {
@@ -71,18 +71,10 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
if len(data.manifestResponses) > 0 {
for _, response := range data.manifestResponses {
if repoErr != nil {
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(response, repoErr).Once()
} else {
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(response, nil).Once()
}
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(response, nil).Once()
}
} else {
if repoErr != nil {
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, repoErr).Once()
} else {
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil).Once()
}
mockRepoClient.On("GenerateManifest", mock.Anything, mock.Anything).Return(data.manifestResponse, nil)
}
mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient}
@@ -124,7 +116,6 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
time.Minute,
time.Hour,
time.Minute,
time.Second*10,
common.DefaultPortArgoCDMetrics,
data.metricsCacheExpiration,
[]string{},
@@ -132,7 +123,6 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
true,
nil,
data.applicationNamespaces,
nil,
)
if err != nil {
panic(err)
@@ -373,7 +363,7 @@ func newFakeCM() map[string]interface{} {
func TestAutoSync(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -390,7 +380,7 @@ func TestAutoSync(t *testing.T) {
func TestAutoSyncNotAllowEmpty(t *testing.T) {
app := newFakeApp()
app.Spec.SyncPolicy.Automated.Prune = true
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -403,7 +393,7 @@ func TestAutoSyncAllowEmpty(t *testing.T) {
app := newFakeApp()
app.Spec.SyncPolicy.Automated.Prune = true
app.Spec.SyncPolicy.Automated.AllowEmpty = true
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -417,7 +407,7 @@ func TestSkipAutoSync(t *testing.T) {
// Set current to 'aaaaa', desired to 'aaaa' and mark system OutOfSync
t.Run("PreviouslySyncedToRevision", func(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
@@ -432,7 +422,7 @@ func TestSkipAutoSync(t *testing.T) {
// Verify we skip when we are already Synced (even if revision is different)
t.Run("AlreadyInSyncedState", func(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeSynced,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -448,7 +438,7 @@ func TestSkipAutoSync(t *testing.T) {
t.Run("AutoSyncIsDisabled", func(t *testing.T) {
app := newFakeApp()
app.Spec.SyncPolicy = nil
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -465,7 +455,7 @@ func TestSkipAutoSync(t *testing.T) {
app := newFakeApp()
now := metav1.Now()
app.DeletionTimestamp = &now
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -491,7 +481,7 @@ func TestSkipAutoSync(t *testing.T) {
Source: *app.Spec.Source.DeepCopy(),
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -505,7 +495,7 @@ func TestSkipAutoSync(t *testing.T) {
t.Run("NeedsToPruneResourcesOnlyButAutomatedPruneDisabled", func(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
@@ -531,7 +521,7 @@ func TestAutoSyncIndicateError(t *testing.T) {
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
@@ -566,7 +556,7 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
},
},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
syncStatus := v1alpha1.SyncStatus{
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
@@ -623,7 +613,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
}}, nil)
}})
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
@@ -634,7 +624,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
_, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
return []*v1alpha1.Cluster{}, nil
@@ -673,7 +663,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
kube.GetResourceKey(appObj): appObj,
kube.GetResourceKey(strayObj): strayObj,
},
}, nil)
})
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
@@ -684,7 +674,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
objs, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
return []*v1alpha1.Cluster{}, nil
@@ -708,7 +698,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
}}, nil)
}})
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
@@ -718,7 +708,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
_, err := ctrl.finalizeApplicationDeletion(app, func(project string) ([]*v1alpha1.Cluster, error) {
return []*v1alpha1.Cluster{}, nil
@@ -737,7 +727,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
}}, nil)
}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
@@ -801,9 +791,9 @@ func TestNormalizeApplication(t *testing.T) {
{
// Verify we normalize the app because project is missing
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
key, _ := cache.MetaNamespaceKeyFunc(app)
ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.appRefreshQueue.Add(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
@@ -813,7 +803,7 @@ func TestNormalizeApplication(t *testing.T) {
normalized = true
}
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
ctrl.processAppRefreshQueueItem()
assert.True(t, normalized)
@@ -823,9 +813,9 @@ func TestNormalizeApplication(t *testing.T) {
// Verify we don't unnecessarily normalize app when project is set
app.Spec.Project = "default"
data.apps[0] = app
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
key, _ := cache.MetaNamespaceKeyFunc(app)
ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.appRefreshQueue.Add(key)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
normalized := false
@@ -835,7 +825,7 @@ func TestNormalizeApplication(t *testing.T) {
normalized = true
}
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
ctrl.processAppRefreshQueueItem()
assert.False(t, normalized)
@@ -848,7 +838,7 @@ func TestHandleAppUpdated(t *testing.T) {
app.Spec.Destination.Server = v1alpha1.KubernetesInternalAPIServerAddr
proj := defaultProj.DeepCopy()
proj.Spec.SourceNamespaces = []string{test.FakeArgoCDNamespace}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}})
ctrl.handleObjectUpdated(map[string]bool{app.InstanceName(ctrl.namespace): true}, kube.GetObjectRef(kube.MustToUnstructured(app)))
isRequested, level := ctrl.isRefreshRequested(app.QualifiedName())
@@ -875,7 +865,7 @@ func TestHandleOrphanedResourceUpdated(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &v1alpha1.OrphanedResourcesMonitorSettings{}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app1, app2, proj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app1, app2, proj}})
ctrl.handleObjectUpdated(map[string]bool{}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: test.FakeArgoCDNamespace})
@@ -910,7 +900,7 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
kube.NewResourceKey("apps", "Deployment", "default", "deploy1"): {ResourceNode: orphanedDeploy1},
kube.NewResourceKey("apps", "Deployment", "default", "deploy2"): {ResourceNode: orphanedDeploy2},
},
}, nil)
})
tree, err := ctrl.getResourceTree(app, []*v1alpha1.ResourceDiff{{
Namespace: "default",
Name: "nginx-deployment",
@@ -926,13 +916,13 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
}
func TestSetOperationStateOnDeletedApp(t *testing.T) {
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
patched := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, &v1alpha1.Application{}, apierr.NewNotFound(schema.GroupResource{}, "my-app")
return true, nil, apierr.NewNotFound(schema.GroupResource{}, "my-app")
})
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
assert.True(t, patched)
@@ -957,16 +947,16 @@ func TestSetOperationStateLogRetries(t *testing.T) {
t.Cleanup(func() {
logrus.StandardLogger().ReplaceHooks(logrus.LevelHooks{})
})
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
patched := false
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if !patched {
patched = true
return true, &v1alpha1.Application{}, errors.New("fake error")
return true, nil, errors.New("fake error")
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
assert.True(t, patched)
@@ -1008,7 +998,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
t.Run("no need to refresh just reconciled application", func(t *testing.T) {
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
@@ -1020,7 +1010,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
assert.False(t, needRefresh)
// use a one-off controller so other tests don't have a manual refresh request
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
// refresh app using the 'deepest' requested comparison level
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
@@ -1048,7 +1038,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
app := app.DeepCopy()
// use a one-off controller so other tests don't have a manual refresh request
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
assert.False(t, needRefresh)
@@ -1078,7 +1068,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
}
// use a one-off controller so other tests don't have a manual refresh request
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
assert.False(t, needRefresh)
@@ -1158,7 +1148,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
}
func TestUpdatedManagedNamespaceMetadata(t *testing.T) {
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
app := newFakeApp()
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &v1alpha1.ManagedNamespaceMetadata{
Labels: map[string]string{
@@ -1182,7 +1172,7 @@ func TestUpdatedManagedNamespaceMetadata(t *testing.T) {
}
func TestUnchangedManagedNamespaceMetadata(t *testing.T) {
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
app := newFakeApp()
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &v1alpha1.ManagedNamespaceMetadata{
Labels: map[string]string{
@@ -1225,7 +1215,7 @@ func TestRefreshAppConditions(t *testing.T) {
t.Run("NoErrorConditions", func(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
_, hasErrors := ctrl.refreshAppConditions(app)
assert.False(t, hasErrors)
@@ -1236,7 +1226,7 @@ func TestRefreshAppConditions(t *testing.T) {
app := newFakeApp()
app.Status.SetConditions([]v1alpha1.ApplicationCondition{{Type: v1alpha1.ApplicationConditionExcludedResourceWarning}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
_, hasErrors := ctrl.refreshAppConditions(app)
assert.False(t, hasErrors)
@@ -1249,7 +1239,7 @@ func TestRefreshAppConditions(t *testing.T) {
app.Spec.Project = "wrong project"
app.Status.SetConditions([]v1alpha1.ApplicationCondition{{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: "old message"}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}})
_, hasErrors := ctrl.refreshAppConditions(app)
assert.True(t, hasErrors)
@@ -1273,7 +1263,7 @@ func TestUpdateReconciledAt(t *testing.T) {
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}, nil)
})
key, _ := cache.MetaNamespaceKeyFunc(app)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
fakeAppCs.ReactionChain = nil
@@ -1282,13 +1272,13 @@ func TestUpdateReconciledAt(t *testing.T) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
t.Run("UpdatedOnFullReconciliation", func(t *testing.T) {
receivedPatch = map[string]interface{}{}
ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), nil)
ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.appRefreshQueue.Add(key)
ctrl.processAppRefreshQueueItem()
@@ -1303,7 +1293,7 @@ func TestUpdateReconciledAt(t *testing.T) {
t.Run("NotUpdatedOnPartialReconciliation", func(t *testing.T) {
receivedPatch = map[string]interface{}{}
ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.appRefreshQueue.Add(key)
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
ctrl.processAppRefreshQueueItem()
@@ -1331,9 +1321,9 @@ func TestProjectErrorToCondition(t *testing.T) {
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}, nil)
})
key, _ := cache.MetaNamespaceKeyFunc(app)
ctrl.appRefreshQueue.AddRateLimited(key)
ctrl.appRefreshQueue.Add(key)
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
ctrl.processAppRefreshQueueItem()
@@ -1350,13 +1340,13 @@ func TestProjectErrorToCondition(t *testing.T) {
func TestFinalizeProjectDeletion_HasApplications(t *testing.T) {
app := newFakeApp()
proj := &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, proj}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
patched := false
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patched = true
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
err := ctrl.finalizeProjectDeletion(proj)
@@ -1366,7 +1356,7 @@ func TestFinalizeProjectDeletion_HasApplications(t *testing.T) {
func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) {
proj := &v1alpha1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{&defaultProj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{&defaultProj}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
@@ -1374,7 +1364,7 @@ func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.AppProject{}, nil
return true, nil, nil
})
err := ctrl.finalizeProjectDeletion(proj)
@@ -1392,14 +1382,14 @@ func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
app.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1417,7 +1407,7 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
proj := defaultProj
proj.Name = "test-project"
proj.Spec.SourceNamespaces = []string{test.FakeArgoCDNamespace}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &proj}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &proj}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
func() {
@@ -1427,7 +1417,7 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
}()
@@ -1446,14 +1436,14 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
Sync: &v1alpha1.SyncOperation{},
Retry: v1alpha1.RetryStrategy{Limit: 1},
}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1489,14 +1479,14 @@ func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
Revision: "abc123",
},
}
ctrl := newFakeController(data, nil)
ctrl := newFakeController(data)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1522,14 +1512,14 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
Revision: "abc123",
},
}
ctrl := newFakeController(data, nil)
ctrl := newFakeController(data)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]interface{}{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
assert.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.Application{}, nil
return true, nil, nil
})
ctrl.processRequestedAppOperation(app)
@@ -1549,7 +1539,7 @@ func TestGetAppHosts(t *testing.T) {
Revision: "abc123",
},
}
ctrl := newFakeController(data, nil)
ctrl := newFakeController(data)
mockStateCache := &mockstatecache.LiveStateCache{}
mockStateCache.On("IterateResources", mock.Anything, mock.MatchedBy(func(callback func(res *clustercache.Resource, info *statecache.ResourceInfo)) bool {
// node resource
@@ -1599,15 +1589,15 @@ func TestGetAppHosts(t *testing.T) {
func TestMetricsExpiration(t *testing.T) {
app := newFakeApp()
// Check expiration is disabled by default
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
assert.False(t, ctrl.metricsServer.HasExpiration())
// Check expiration is enabled if set
ctrl = newFakeController(&fakeData{apps: []runtime.Object{app}, metricsCacheExpiration: 10 * time.Second}, nil)
ctrl = newFakeController(&fakeData{apps: []runtime.Object{app}, metricsCacheExpiration: 10 * time.Second})
assert.True(t, ctrl.metricsServer.HasExpiration())
}
func TestToAppKey(t *testing.T) {
ctrl := newFakeController(&fakeData{}, nil)
ctrl := newFakeController(&fakeData{})
tests := []struct {
name string
input string
@@ -1627,7 +1617,7 @@ func TestToAppKey(t *testing.T) {
func Test_canProcessApp(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
ctrl.applicationNamespaces = []string{"good"}
t.Run("without cluster filter, good namespace", func(t *testing.T) {
app.Namespace = "good"
@@ -1660,7 +1650,7 @@ func Test_canProcessAppSkipReconcileAnnotation(t *testing.T) {
appSkipReconcileFalse.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "false"}
appSkipReconcileTrue := newFakeApp()
appSkipReconcileTrue.Annotations = map[string]string{common.AnnotationKeyAppSkipReconcile: "true"}
ctrl := newFakeController(&fakeData{}, nil)
ctrl := newFakeController(&fakeData{})
tests := []struct {
name string
input interface{}
@@ -1681,7 +1671,7 @@ func Test_canProcessAppSkipReconcileAnnotation(t *testing.T) {
func Test_syncDeleteOption(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
cm := newFakeCM()
t.Run("without delete option object is deleted", func(t *testing.T) {
cmObj := kube.MustToUnstructured(&cm)
@@ -1708,7 +1698,7 @@ func TestAddControllerNamespace(t *testing.T) {
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponse: &apiclient.ManifestResponse{},
}, nil)
})
ctrl.processAppRefreshQueueItem()
@@ -1727,7 +1717,7 @@ func TestAddControllerNamespace(t *testing.T) {
apps: []runtime.Object{app, &proj},
manifestResponse: &apiclient.ManifestResponse{},
applicationNamespaces: []string{appNamespace},
}, nil)
})
ctrl.processAppRefreshQueueItem()

View File

@@ -120,7 +120,7 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
}
fakeClient := fake.NewSimpleClientset()
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
liveStateCacheLock := sync.RWMutex{}
externalLockRef := sync.RWMutex{}
gitopsEngineClusterCache := &mocks.ClusterCache{}
clustersCache := liveStateCache{
clusters: map[string]cache.ClusterCache{
@@ -132,14 +132,11 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
settingsMgr: settingsMgr,
// Set the lock here so we can reference it later
// nolint We need to overwrite here to have access to the lock
lock: liveStateCacheLock,
lock: externalLockRef,
}
channel := make(chan string)
// Mocked lock held by the gitops-engine cluster cache
gitopsEngineClusterCacheLock := sync.Mutex{}
// Ensure completion of both EnsureSynced and Invalidate
ensureSyncedCompleted := sync.Mutex{}
invalidateCompleted := sync.Mutex{}
mockMutex := sync.RWMutex{}
// Locks to force trigger condition during test
// Condition order:
// EnsuredSynced -> Locks gitops-engine
@@ -147,39 +144,40 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
// EnsureSynced via sync, newResource, populateResourceInfoHandler -> attempts to Lock liveStateCache
// handleDeleteEvent via cluster.Invalidate -> attempts to Lock gitops-engine
handleDeleteWasCalled := sync.Mutex{}
engineHoldsEngineLock := sync.Mutex{}
ensureSyncedCompleted.Lock()
invalidateCompleted.Lock()
engineHoldsLock := sync.Mutex{}
handleDeleteWasCalled.Lock()
engineHoldsEngineLock.Lock()
engineHoldsLock.Lock()
gitopsEngineClusterCache.On("EnsureSynced").Run(func(args mock.Arguments) {
gitopsEngineClusterCacheLock.Lock()
t.Log("EnsureSynced: Engine has engine lock")
engineHoldsEngineLock.Unlock()
defer gitopsEngineClusterCacheLock.Unlock()
// Wait until handleDeleteEvent holds the liveStateCache lock
// Held by EnsureSync calling into sync and watchEvents
mockMutex.Lock()
defer mockMutex.Unlock()
// Continue Execution of timer func
engineHoldsLock.Unlock()
// Wait for handleDeleteEvent to be called triggering the lock
// on the liveStateCache
handleDeleteWasCalled.Lock()
// Try and obtain the liveStateCache lock
clustersCache.lock.Lock()
t.Log("EnsureSynced: Engine has LiveStateCache lock")
clustersCache.lock.Unlock()
ensureSyncedCompleted.Unlock()
}).Return(nil).Once()
gitopsEngineClusterCache.On("Invalidate").Run(func(args mock.Arguments) {
// Allow EnsureSynced to continue now that we're in the deadlock condition
t.Logf("handleDelete was called, EnsureSynced continuing...")
handleDeleteWasCalled.Unlock()
// Wait until gitops engine holds the gitops lock
// This prevents timing issues if we reach this point before EnsureSynced has obtained the lock
engineHoldsEngineLock.Lock()
t.Log("Invalidate: Engine has engine lock")
engineHoldsEngineLock.Unlock()
// Lock engine lock
gitopsEngineClusterCacheLock.Lock()
t.Log("Invalidate: Invalidate has engine lock")
gitopsEngineClusterCacheLock.Unlock()
invalidateCompleted.Unlock()
// Try and obtain the lock on the liveStateCache
alreadyFailed := !externalLockRef.TryLock()
if alreadyFailed {
channel <- "DEADLOCKED -- EnsureSynced could not obtain lock on liveStateCache"
return
}
externalLockRef.Lock()
t.Logf("EnsureSynce was able to lock liveStateCache")
externalLockRef.Unlock()
}).Return(nil).Once()
gitopsEngineClusterCache.On("Invalidate").Run(func(args mock.Arguments) {
// If deadlock is fixed should be able to acquire lock here
alreadyFailed := !mockMutex.TryLock()
if alreadyFailed {
channel <- "DEADLOCKED -- Invalidate could not obtain lock on gitops-engine"
return
}
mockMutex.Lock()
t.Logf("Invalidate was able to lock gitops-engine cache")
mockMutex.Unlock()
}).Return()
go func() {
// Start the gitops-engine lock holds
@@ -189,14 +187,14 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
assert.Fail(t, err.Error())
}
}()
// Wait for EnsureSynced to grab the lock for gitops-engine
engineHoldsLock.Lock()
t.Log("EnsureSynced has obtained lock on gitops-engine")
engineHoldsLock.Unlock()
// Run in background
go clustersCache.handleDeleteEvent(testCluster.Server)
// Allow execution to continue on clusters cache call to trigger lock
ensureSyncedCompleted.Lock()
invalidateCompleted.Lock()
t.Log("Competing functions were able to obtain locks")
invalidateCompleted.Unlock()
ensureSyncedCompleted.Unlock()
handleDeleteWasCalled.Unlock()
channel <- "PASSED"
}()
select {

View File

@@ -37,16 +37,6 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
}
}
}
for k, v := range un.GetAnnotations() {
if strings.HasPrefix(k, common.AnnotationKeyLinkPrefix) {
if res.NetworkingInfo == nil {
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{}
}
res.NetworkingInfo.ExternalURLs = append(res.NetworkingInfo.ExternalURLs, v)
}
}
switch gvk.Group {
case "":
switch gvk.Kind {
@@ -68,6 +58,15 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
populateIstioVirtualServiceInfo(un, res)
}
}
for k, v := range un.GetAnnotations() {
if strings.HasPrefix(k, common.AnnotationKeyLinkPrefix) {
if res.NetworkingInfo == nil {
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{}
}
res.NetworkingInfo.ExternalURLs = append(res.NetworkingInfo.ExternalURLs, v)
}
}
}
func getIngress(un *unstructured.Unstructured) []v1.LoadBalancerIngress {
@@ -94,13 +93,7 @@ func populateServiceInfo(un *unstructured.Unstructured, res *ResourceInfo) {
if serviceType, ok, err := unstructured.NestedString(un.Object, "spec", "type"); ok && err == nil && serviceType == string(v1.ServiceTypeLoadBalancer) {
ingress = getIngress(un)
}
var urls []string
if res.NetworkingInfo != nil {
urls = res.NetworkingInfo.ExternalURLs
}
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress, ExternalURLs: urls}
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetLabels: targetLabels, Ingress: ingress}
}
func getServiceName(backend map[string]interface{}, gvk schema.GroupVersionKind) (string, error) {
@@ -270,12 +263,7 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
targets = append(targets, target)
}
var urls []string
if res.NetworkingInfo != nil {
urls = res.NetworkingInfo.ExternalURLs
}
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets}
}
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
@@ -386,13 +374,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
if restarts > 0 {
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Restart Count", Value: fmt.Sprintf("%d", restarts)})
}
var urls []string
if res.NetworkingInfo != nil {
urls = res.NetworkingInfo.ExternalURLs
}
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels(), ExternalURLs: urls}
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{Labels: un.GetLabels()}
}
func populateHostNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) {

View File

@@ -406,7 +406,7 @@ func TestGetLinkAnnotatedIngressInfo(t *testing.T) {
Kind: kube.ServiceKind,
Name: "helm-guestbook",
}},
ExternalURLs: []string{"http://my-grafana.com/ingress-link", "https://helm-guestbook.com/"},
ExternalURLs: []string{"https://helm-guestbook.com/", "http://my-grafana.com/ingress-link"},
}, info.NetworkingInfo)
}

View File

@@ -3,15 +3,12 @@ package controller
import (
"context"
"encoding/json"
"errors"
"fmt"
v1 "k8s.io/api/core/v1"
"reflect"
"strings"
goSync "sync"
"time"
v1 "k8s.io/api/core/v1"
"github.com/argoproj/gitops-engine/pkg/diff"
"github.com/argoproj/gitops-engine/pkg/health"
"github.com/argoproj/gitops-engine/pkg/sync"
@@ -43,10 +40,6 @@ import (
"github.com/argoproj/argo-cd/v2/util/stats"
)
var (
CompareStateRepoError = errors.New("failed to get repo objects")
)
type resourceInfoProviderStub struct {
}
@@ -69,7 +62,7 @@ type managedResource struct {
// AppStateManager defines methods which allow to compare application spec and actual application state.
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) *comparisonResult
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
}
@@ -112,11 +105,14 @@ type appStateManager struct {
statusRefreshTimeout time.Duration
resourceTracking argo.ResourceTracking
persistResourceHealth bool
repoErrorCache goSync.Map
repoErrorGracePeriod time.Duration
}
// getRepoObjs will generate the manifests for the given application delegating the
// task to the repo-server. It returns the list of generated manifests as unstructured
// objects. It also returns the full response from all calls to the repo server as the
// second argument.
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {
ts := stats.NewTimingStats()
helmRepos, err := m.db.ListHelmRepositories(context.Background())
if err != nil {
@@ -353,7 +349,7 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
// CompareAppState compares application git state to the live app state, using the specified
// revision and supplied source. If revision or overrides are empty, then compares against
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) *comparisonResult {
ts := stats.NewTimingStats()
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
@@ -369,7 +365,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
Revisions: revisions,
},
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
}, nil
}
} else {
return &comparisonResult{
syncStatus: &v1alpha1.SyncStatus{
@@ -378,7 +374,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
Revision: revisions[0],
},
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
}, nil
}
}
}
@@ -416,21 +412,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
targetObjs = make([]*unstructured.Unstructured, 0)
msg := fmt.Sprintf("Failed to load target state: %s", err.Error())
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
if firstSeen, ok := m.repoErrorCache.Load(app.Name); ok {
if time.Since(firstSeen.(time.Time)) <= m.repoErrorGracePeriod && !noRevisionCache {
// if first seen is less than grace period and it's not a Level 3 comparison,
// ignore error and short circuit
logCtx.Debugf("Ignoring repo error %v, already encountered error in grace period", err.Error())
return nil, CompareStateRepoError
}
} else if !noRevisionCache {
logCtx.Debugf("Ignoring repo error %v, new occurrence", err.Error())
m.repoErrorCache.Store(app.Name, time.Now())
return nil, CompareStateRepoError
}
failedToLoadObjs = true
} else {
m.repoErrorCache.Delete(app.Name)
}
} else {
// Prevent applying local manifests for now when signature verification is enabled
@@ -580,21 +562,16 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
manifestRevisions = append(manifestRevisions, manifestInfo.Revision)
}
// restore comparison using cached diff result if previous comparison was performed for the same revision
revisionChanged := len(manifestInfos) != len(sources) || !reflect.DeepEqual(app.Status.Sync.Revisions, manifestRevisions)
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, v1alpha1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences})
_, refreshRequested := app.IsRefreshRequested()
noCache = noCache || refreshRequested || app.Status.Expired(m.statusRefreshTimeout) || specChanged || revisionChanged
useDiffCache := useDiffCache(noCache, manifestInfos, sources, app, manifestRevisions, m.statusRefreshTimeout, logCtx)
diffConfigBuilder := argodiff.NewDiffConfigBuilder().
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).
WithTracking(appLabelKey, string(trackingMethod))
if noCache {
diffConfigBuilder.WithNoCache()
if useDiffCache {
diffConfigBuilder.WithCache(m.cache, app.InstanceName(m.namespace))
} else {
diffConfigBuilder.WithCache(m.cache, app.GetName())
diffConfigBuilder.WithNoCache()
}
gvkParser, err := m.getGVKParser(app.Spec.Destination.Server)
@@ -798,7 +775,47 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
})
ts.AddCheckpoint("health_ms")
compRes.timings = ts.Timings()
return &compRes, nil
return &compRes
}
// useDiffCache will determine if the diff should be calculated based
// on the existing live state cache or not.
func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sources []v1alpha1.ApplicationSource, app *v1alpha1.Application, manifestRevisions []string, statusRefreshTimeout time.Duration, log *log.Entry) bool {
if noCache {
log.WithField("useDiffCache", "false").Debug("noCache is true")
return false
}
_, refreshRequested := app.IsRefreshRequested()
if refreshRequested {
log.WithField("useDiffCache", "false").Debug("refreshRequested")
return false
}
if app.Status.Expired(statusRefreshTimeout) {
log.WithField("useDiffCache", "false").Debug("app.status.expired")
return false
}
if len(manifestInfos) != len(sources) {
log.WithField("useDiffCache", "false").Debug("manifestInfos len != sources len")
return false
}
revisionChanged := !reflect.DeepEqual(app.Status.GetRevisions(), manifestRevisions)
if revisionChanged {
log.WithField("useDiffCache", "false").Debug("revisionChanged")
return false
}
currentSpec := app.BuildComparedToStatus()
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)
if specChanged {
log.WithField("useDiffCache", "false").Debug("specChanged")
return false
}
log.WithField("useDiffCache", "true").Debug("using diff cache")
return true
}
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, revisions []string, sources []v1alpha1.ApplicationSource, hasMultipleSources bool, startedAt metav1.Time) error {
@@ -854,7 +871,6 @@ func NewAppStateManager(
statusRefreshTimeout time.Duration,
resourceTracking argo.ResourceTracking,
persistResourceHealth bool,
repoErrorGracePeriod time.Duration,
) AppStateManager {
return &appStateManager{
liveStateCache: liveStateCache,
@@ -870,7 +886,6 @@ func NewAppStateManager(
statusRefreshTimeout: statusRefreshTimeout,
resourceTracking: resourceTracking,
persistResourceHealth: persistResourceHealth,
repoErrorGracePeriod: repoErrorGracePeriod,
}
}

View File

@@ -2,7 +2,6 @@ package controller
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
@@ -11,6 +10,9 @@ import (
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
logrustest "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -38,13 +40,12 @@ func TestCompareAppStateEmpty(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -53,31 +54,6 @@ func TestCompareAppStateEmpty(t *testing.T) {
assert.Len(t, app.Status.Conditions, 0)
}
// TestCompareAppStateRepoError tests the case when CompareAppState notices a repo error
func TestCompareAppStateRepoError(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{manifestResponses: make([]*apiclient.ManifestResponse, 3)}, fmt.Errorf("test repo error"))
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, compRes)
assert.EqualError(t, err, CompareStateRepoError.Error())
// expect to still get compare state error to as inside grace period
compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, compRes)
assert.EqualError(t, err, CompareStateRepoError.Error())
time.Sleep(10 * time.Second)
// expect to not get error as outside of grace period, but status should be unknown
compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Nil(t, err)
assert.Equal(t, compRes.syncStatus.Status, argoappv1.SyncStatusCodeUnknown)
}
// TestCompareAppStateNamespaceMetadataDiffers tests comparison when managed namespace metadata differs
func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
app := newFakeApp()
@@ -102,13 +78,12 @@ func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -151,13 +126,12 @@ func TestCompareAppStateNamespaceMetadataDiffersToManifest(t *testing.T) {
kube.GetResourceKey(ns): ns,
},
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -209,13 +183,12 @@ func TestCompareAppStateNamespaceMetadata(t *testing.T) {
kube.GetResourceKey(ns): ns,
},
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -268,13 +241,12 @@ func TestCompareAppStateNamespaceMetadataIsTheSame(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -296,13 +268,12 @@ func TestCompareAppStateMissing(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
@@ -328,13 +299,12 @@ func TestCompareAppStateExtra(t *testing.T) {
key: pod,
},
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
@@ -359,13 +329,12 @@ func TestCompareAppStateHook(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 0, len(compRes.resources))
@@ -391,13 +360,12 @@ func TestCompareAppStateSkipHook(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, 1, len(compRes.resources))
@@ -421,14 +389,13 @@ func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -455,13 +422,12 @@ func TestCompareAppStateExtraHook(t *testing.T) {
key: pod,
},
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -484,13 +450,12 @@ func TestAppRevisionsSingleSource(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
app := newFakeApp()
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.NotEmpty(t, compRes.syncStatus.Revision)
@@ -524,13 +489,12 @@ func TestAppRevisionsMultiSource(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
app := newFakeMultiSourceApp()
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Empty(t, compRes.syncStatus.Revision)
@@ -572,13 +536,12 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
kube.GetResourceKey(obj3): obj3,
},
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Equal(t, 1, len(app.Status.Conditions))
@@ -613,9 +576,8 @@ func TestCompareAppStateManagedNamespaceMetadataWithLiveNsDoesNotGetPruned(t *te
kube.GetResourceKey(ns): ns,
},
}
ctrl := newFakeController(&data, nil)
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, []string{}, app.Spec.Sources, false, false, nil, false)
assert.Nil(t, err)
ctrl := newFakeController(&data)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, []string{}, app.Spec.Sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.Equal(t, 0, len(app.Status.Conditions))
@@ -669,14 +631,13 @@ func TestSetHealth(t *testing.T) {
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(deployment): deployment,
},
}, nil)
})
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
}
@@ -706,14 +667,13 @@ func TestSetHealthSelfReferencedApp(t *testing.T) {
kube.GetResourceKey(deployment): deployment,
kube.GetResourceKey(unstructuredApp): unstructuredApp,
},
}, nil)
})
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
}
@@ -733,7 +693,7 @@ func TestSetManagedResourcesWithOrphanedResources(t *testing.T) {
AppName: "",
},
},
}, nil)
})
tree, err := ctrl.setAppManagedResources(app, &comparisonResult{managedResources: make([]managedResource, 0)})
@@ -762,7 +722,7 @@ func TestSetManagedResourcesWithResourcesOfAnotherApp(t *testing.T) {
AppName: "app2",
},
},
}, nil)
})
tree, err := ctrl.setAppManagedResources(app1, &comparisonResult{managedResources: make([]managedResource, 0)})
@@ -781,14 +741,13 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) {
configMapData: map[string]string{
"resource.customizations": "invalid setting",
},
}, nil)
})
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Equal(t, health.HealthStatusUnknown, compRes.healthStatus.Status)
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
@@ -815,7 +774,7 @@ func TestSetManagedResourcesKnownOrphanedResourceExceptions(t *testing.T) {
ResourceNode: argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Kind: kube.ServiceAccountKind, Name: "kubernetes", Namespace: app.Namespace}},
},
},
}, nil)
})
tree, err := ctrl.setAppManagedResources(app, &comparisonResult{managedResources: make([]managedResource, 0)})
@@ -828,7 +787,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
app := newFakeApp()
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app},
}, nil)
})
manager := ctrl.appStateManager.(*appStateManager)
setRevisionHistoryLimit := func(value int) {
i := int64(value)
@@ -923,13 +882,12 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -950,13 +908,12 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -982,13 +939,12 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1009,13 +965,12 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1036,13 +991,12 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1063,13 +1017,12 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1091,15 +1044,14 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
testProj := signedProj
testProj.Spec.SignatureKeys[0].KeyID = "4AEE18F83AFDEB24"
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
compRes, err := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1123,13 +1075,12 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
}
// it doesn't matter for our test whether local manifests are valid
localManifests := []string{"foobar"}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
@@ -1153,13 +1104,12 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1183,13 +1133,12 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
}
// it doesn't matter for our test whether local manifests are valid
localManifests := []string{""}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "abc123")
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
assert.Nil(t, err)
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
@@ -1324,7 +1273,7 @@ func TestIsLiveResourceManaged(t *testing.T) {
kube.GetResourceKey(unmanagedObjWrongGroup): unmanagedObjWrongGroup,
kube.GetResourceKey(unmanagedObjWrongNamespace): unmanagedObjWrongNamespace,
},
}, nil)
})
manager := ctrl.appStateManager.(*appStateManager)
appName := "guestbook"
@@ -1393,3 +1342,252 @@ func TestIsLiveResourceManaged(t *testing.T) {
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
}
func TestUseDiffCache(t *testing.T) {
type fixture struct {
testName string
noCache bool
manifestInfos []*apiclient.ManifestResponse
sources []argoappv1.ApplicationSource
app *argoappv1.Application
manifestRevisions []string
statusRefreshTimeout time.Duration
expectedUseCache bool
}
manifestInfos := func(revision string) []*apiclient.ManifestResponse {
return []*apiclient.ManifestResponse{
{
Manifests: []string{
"{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"labels\":{\"app.kubernetes.io/instance\":\"httpbin\"},\"name\":\"httpbin-svc\",\"namespace\":\"httpbin\"},\"spec\":{\"ports\":[{\"name\":\"http-port\",\"port\":7777,\"targetPort\":80},{\"name\":\"test\",\"port\":333}],\"selector\":{\"app\":\"httpbin\"}}}",
"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"labels\":{\"app.kubernetes.io/instance\":\"httpbin\"},\"name\":\"httpbin-deployment\",\"namespace\":\"httpbin\"},\"spec\":{\"replicas\":2,\"selector\":{\"matchLabels\":{\"app\":\"httpbin\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"httpbin\"}},\"spec\":{\"containers\":[{\"image\":\"kennethreitz/httpbin\",\"imagePullPolicy\":\"Always\",\"name\":\"httpbin\",\"ports\":[{\"containerPort\":80}]}]}}}}",
},
Namespace: "",
Server: "",
Revision: revision,
SourceType: "Kustomize",
VerifyResult: "",
},
}
}
sources := func() []argoappv1.ApplicationSource {
return []argoappv1.ApplicationSource{
{
RepoURL: "https://some-repo.com",
Path: "argocd/httpbin",
TargetRevision: "HEAD",
},
}
}
app := func(namespace string, revision string, refresh bool, a *argoappv1.Application) *argoappv1.Application {
app := &argoappv1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "httpbin",
Namespace: namespace,
},
Spec: argoappv1.ApplicationSpec{
Source: &argoappv1.ApplicationSource{
RepoURL: "https://some-repo.com",
Path: "argocd/httpbin",
TargetRevision: "HEAD",
},
Destination: argoappv1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "httpbin",
},
Project: "default",
SyncPolicy: &argoappv1.SyncPolicy{
SyncOptions: []string{
"CreateNamespace=true",
"ServerSideApply=true",
},
},
},
Status: argoappv1.ApplicationStatus{
Resources: []argoappv1.ResourceStatus{},
Sync: argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeSynced,
ComparedTo: argoappv1.ComparedTo{
Source: argoappv1.ApplicationSource{
RepoURL: "https://some-repo.com",
Path: "argocd/httpbin",
TargetRevision: "HEAD",
},
Destination: argoappv1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "httpbin",
},
},
Revision: revision,
Revisions: []string{},
},
ReconciledAt: &metav1.Time{
Time: time.Now().Add(-time.Hour),
},
},
}
if refresh {
annotations := make(map[string]string)
annotations[argoappv1.AnnotationKeyRefresh] = string(argoappv1.RefreshTypeNormal)
app.SetAnnotations(annotations)
}
if a != nil {
err := mergo.Merge(app, a, mergo.WithOverride, mergo.WithOverwriteWithEmptyValue)
if err != nil {
t.Fatalf("error merging app: %s", err)
}
}
return app
}
cases := []fixture{
{
testName: "will use diff cache",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, nil),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: true,
},
{
testName: "will use diff cache for multisource",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "", false, &argoappv1.Application{
Spec: argoappv1.ApplicationSpec{
Source: nil,
Sources: argoappv1.ApplicationSources{
{
RepoURL: "multisource repo1",
},
{
RepoURL: "multisource repo2",
},
},
},
Status: argoappv1.ApplicationStatus{
Resources: []argoappv1.ResourceStatus{},
Sync: argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeSynced,
ComparedTo: argoappv1.ComparedTo{
Source: argoappv1.ApplicationSource{},
Sources: argoappv1.ApplicationSources{
{
RepoURL: "multisource repo1",
},
{
RepoURL: "multisource repo2",
},
},
},
Revisions: []string{"rev1", "rev2"},
},
ReconciledAt: &metav1.Time{
Time: time.Now().Add(-time.Hour),
},
},
}),
manifestRevisions: []string{"rev1", "rev2"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: true,
},
{
testName: "will return false if nocache is true",
noCache: true,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, nil),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: false,
},
{
testName: "will return false if requested refresh",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", true, nil),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: false,
},
{
testName: "will return false if status expired",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, nil),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Minute,
expectedUseCache: false,
},
{
testName: "will return false if there is a new revision",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, nil),
manifestRevisions: []string{"rev2"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: false,
},
{
testName: "will return false if app spec repo changed",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, &argoappv1.Application{
Spec: argoappv1.ApplicationSpec{
Source: &argoappv1.ApplicationSource{
RepoURL: "new-repo",
},
},
}),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: false,
},
{
testName: "will return false if app spec IgnoreDifferences changed",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, &argoappv1.Application{
Spec: argoappv1.ApplicationSpec{
IgnoreDifferences: []argoappv1.ResourceIgnoreDifferences{
{
Group: "app/v1",
Kind: "application",
Name: "httpbin",
Namespace: "httpbin",
JQPathExpressions: []string{"."},
},
},
},
}),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: false,
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
// Given
t.Parallel()
logger, _ := logrustest.NewNullLogger()
log := logrus.NewEntry(logger)
// When
useDiffCache := useDiffCache(tc.noCache, tc.manifestInfos, tc.sources, tc.app, tc.manifestRevisions, tc.statusRefreshTimeout, log)
// Then
assert.Equal(t, useDiffCache, tc.expectedUseCache)
})
}
}

View File

@@ -3,7 +3,6 @@ package controller
import (
"context"
"encoding/json"
goerrors "errors"
"fmt"
"os"
"strconv"
@@ -153,13 +152,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
revisions = []string{revision}
}
// ignore error if CompareStateRepoError, this shouldn't happen as noRevisionCache is true
compareResult, err := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, app.Spec.HasMultipleSources())
if err != nil && !goerrors.Is(err, CompareStateRepoError) {
state.Phase = common.OperationError
state.Message = err.Error()
return
}
compareResult := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, app.Spec.HasMultipleSources())
// We now have a concrete commit SHA. Save this in the sync result revision so that we remember
// what we should be syncing to when resuming operations.

View File

@@ -41,7 +41,7 @@ func TestPersistRevisionHistory(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
@@ -87,7 +87,7 @@ func TestPersistManagedNamespaceMetadataState(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
@@ -118,7 +118,7 @@ func TestPersistRevisionHistoryRollback(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
// Sync with source specified
source := v1alpha1.ApplicationSource{
@@ -172,7 +172,7 @@ func TestSyncComparisonError(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
// Sync with source unspecified
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
@@ -217,7 +217,7 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
ctrl := newFakeController(&data)
return &fixture{
project: project,

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 175 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

View File

@@ -24,9 +24,10 @@ $ curl $ARGOCD_SERVER/api/v1/applications -H "Authorization: Bearer $ARGOCD_TOKE
#### How to Avoid 403 Errors for Missing Applications
All endpoints of the Applications API accept an optional `project` query string parameter. If the parameter
is specified, and the specified Application does not exist, the API will return a `404` error.
All endpoints of the Applications API accept an optional `project` query string parameter. If the parameter is
specified, and the specified Application does not exist, or if the Application does exist but is not in the given
project, the API will return a `404` error.
Additionally, if the `project` query string parameter is specified and the Application exists but is not in
the given `project`, the API will return a `403` error. This is to prevent leaking information about the
existence of Applications to users who do not have access to them.
If the `project` query string parameter is specified, and the Application does not exist, the API will return a `403`
error. This is to prevent leaking information about the existence of Applications to users who do not have access to
them.

View File

@@ -66,7 +66,7 @@ make builder-image IMAGE_NAMESPACE=argoproj IMAGE_TAG=v1.0.0
## Public CD
Every commit to master is built and published to `ghcr.io/argoproj/argo-cd/argocd:<version>-<short-sha>`. The list of images is available at
[https://github.com/argoproj/argo-cd/packages](https://github.com/argoproj/argo-cd/packages).
https://github.com/argoproj/argo-cd/packages.
!!! note
GitHub docker registry [requires](https://github.community/t5/GitHub-Actions/docker-pull-from-public-GitHub-Package-Registry-fail-with-quot/m-p/32888#M1294) authentication to read

View File

@@ -36,7 +36,7 @@ registerResourceExtension(component: ExtensionComponent, group: string, kind: st
- `component: ExtensionComponent` is a React component that receives the following properties:
- application: Application - Argo CD Application resource;
- resource: State - the Kubernetes resource object;
- resource: State - the kubernetes resource object;
- tree: ApplicationTree - includes list of all resources that comprise the application;
See properties interfaces in [models.ts](https://github.com/argoproj/argo-cd/blob/master/ui/src/app/shared/models.ts)

View File

@@ -78,21 +78,3 @@ The feature PR must include:
If these criteria are not met by the RC date, the feature will be ineligible for inclusion in the RC series or GA for
that minor release. It will have to wait for the next minor release.
### Security Patch Policy
CVEs in Argo CD code will be patched for all [supported versions](../operator-manual/installation.md#supported-versions).
### Dependencies Lifecycle Policy
Dependencies are evaluated before being introduced to ensure they:
1) are actively maintained
2) are maintained by trustworthy maintainers
These evaluations vary from dependency to dependencies.
Dependencies are also scheduled for removal if the project has been deprecated or if the project is no longer maintained.
CVEs in dependencies will be patched for all supported versions if the CVE is applicable and is assessed by Snyk to be
of high or critical severity. Automation generates a [new Snyk scan weekly](../snyk).

View File

@@ -2,14 +2,13 @@
## Developing And Testing
The website is built using `mkdocs` and `mkdocs-material`.
The website is build using `mkdocs` and `mkdocs-material`.
To test:
```bash
make serve-docs
```
Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/).
## Deploying

View File

@@ -205,11 +205,10 @@ you should edit your `~/.kube/config` and modify the `server` option to point to
4. Finally, so that you don't have to keep updating your kube-config whenever you spin up a new k3d cluster, add `--api-port $IP:6550` to your **k3d cluster create** command, where $IP is the value from step 1. An example command is provided here:
```
k3d cluster create my-cluster --wait --k3s-arg '--disable=traefik@server:*' --api-port $IP:6550 -p 443:443@loadbalancer
k3d cluster create my-cluster --wait --k3s-server-arg '--disable=traefik' --api-port $IP:6550 -p 443:443@loadbalancer
```
!!!note
For k3d versions less than v5.0.0, the example command flags `--k3s-arg` and `'--disable=traefik@server:*'` should change to `--k3s-server-arg` and `'--disable=traefik'`, respectively.
Starting from k3d v5.0.0 the example command flags `--k3s-server-arg` and `'--disable=traefik'` would have to be changed to `--k3s-arg` and `'--disable=traefik@server:*'`, respectively.
## The development cycle

View File

@@ -53,7 +53,7 @@ meeting:
![Argo CD Architecture](assets/argocd_architecture.png)
Argo CD is implemented as a Kubernetes controller which continuously monitors running applications
Argo CD is implemented as a kubernetes controller which continuously monitors running applications
and compares the current, live state against the desired target state (as specified in the Git repo).
A deployed application whose live state deviates from the target state is considered `OutOfSync`.
Argo CD reports & visualizes the differences, while providing facilities to automatically or

View File

@@ -20,7 +20,7 @@ Some manual steps will need to be performed by the Argo CD administrator in orde
### Cluster-scoped Argo CD installation
This feature can only be enabled and used when your Argo CD is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will not work with an Argo CD installed in namespace-scoped mode.
This feature can only be enabled and used when your Argo CD is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will *not* work with an Argo CD installed in namespace-scoped mode.
### Switch resource tracking method

View File

@@ -33,6 +33,6 @@ spec:
- jsonPointers:
- /spec/source/targetRevision
- name: some-app
jqExpressions:
jqPathExpressions:
- .spec.source.helm.values

View File

@@ -0,0 +1,9 @@
# Add support for self-signed TLS / Certificates for Gitlab SCM/PR Provider
## Implementation details
### Overview
In order for a self-signed TLS certificate be used by an ApplicationSet's SCM / PR Gitlab Generator, the certificate needs to be mounted on the application-controller. The path of the mounted certificate must be explicitly set using the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH` or alternatively using parameter `--scm-root-ca-path`. The applicationset controller will read the mounted certificate to create the Gitlab client for SCM/PR Providers
This can be achieved conveniently by setting `applicationsetcontroller.scm.root.ca.path` in the argocd-cmd-params-cm ConfigMap. Be sure to restart the ApplicationSet controller after setting this value.

View File

@@ -35,8 +35,6 @@ kind: ApplicationSet
metadata:
name: myapps
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- scmProvider:
gitea:
@@ -55,6 +53,7 @@ spec:
Therefore administrator must restrict the urls of the allowed SCM Providers (example: `https://git.mydomain.com/,https://gitlab.mydomain.com/`) by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.allowed.scm.providers`. If another url is used, it will be rejected by the applicationset controller.
For example:
```yaml
apiVersion: v1
@@ -65,14 +64,7 @@ data:
applicationsetcontroller.allowed.scm.providers: https://git.mydomain.com/,https://gitlab.mydomain.com/
```
!!! note
Please note url used in the `api` field of the `ApplicationSet` must match the url declared by the Administrator including the protocol
!!! warning
The allow-list only applies to SCM providers for which the user may configure a custom `api`. Where an SCM or PR
generator does not accept a custom API URL, the provider is implicitly allowed.
If you do not intend to allow users to use the SCM or PR generators, you can disable them entirely by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ALLOW_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.allow.scm.providers` to `false`.
> Please note url used in the `api` field of the `ApplicationSet` must match the url declared by the Administrator including the protocol
### Overview
@@ -139,19 +131,17 @@ metadata:
name: team-one-product-one
namespace: team-one-cd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
list:
- name: infra
- id: infra
project: infra-project
- name: team-two
- id: team-two
project: team-two-project
template:
metadata:
name: '{{.name}}-escalation'
spec:
project: "{{.project}}"
template:
metadata:
name: '{{name}}-escalation'
spec:
project: "{{project}}"
```
### ApplicationSet names

View File

@@ -1,6 +1,6 @@
# Cluster Decision Resource Generator
The cluster decision resource generates a list of Argo CD clusters. This is done using [duck-typing](https://pkg.go.dev/knative.dev/pkg/apis/duck), which does not require knowledge of the full shape of the referenced Kubernetes resource. The following is an example of a cluster-decision-resource-based ApplicationSet generator:
The cluster decision resource generates a list of Argo CD clusters. This is done using [duck-typing](https://pkg.go.dev/knative.dev/pkg/apis/duck), which does not require knowledge of the full shape of the referenced kubernetes resource. The following is an example of a cluster-decision-resource-based ApplicationSet generator:
```yaml
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
@@ -8,8 +8,6 @@ metadata:
name: guestbook
namespace: argocd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- clusterDecisionResource:
# ConfigMap with GVK information for the duck type resource
@@ -28,7 +26,7 @@ spec:
requeueAfterSeconds: 60
template:
metadata:
name: '{{.name}}-guestbook'
name: '{{name}}-guestbook'
spec:
project: "default"
source:
@@ -36,7 +34,7 @@ spec:
targetRevision: HEAD
path: guestbook
destination:
server: '{{.clusterName}}' # 'server' field of the secret
server: '{{clusterName}}' # 'server' field of the secret
namespace: guestbook
```
The `quak` resource, referenced by the ApplicationSet `clusterDecisionResource` generator:

View File

@@ -39,13 +39,11 @@ metadata:
name: guestbook
namespace: argocd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- clusters: {} # Automatically use all clusters defined within Argo CD
template:
metadata:
name: '{{.name}}-guestbook' # 'name' field of the Secret
name: '{{name}}-guestbook' # 'name' field of the Secret
spec:
project: "my-project"
source:
@@ -53,7 +51,7 @@ spec:
targetRevision: HEAD
path: guestbook
destination:
server: '{{.server}}' # 'server' field of the secret
server: '{{server}}' # 'server' field of the secret
namespace: guestbook
```
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/cluster).*)
@@ -69,8 +67,6 @@ metadata:
name: guestbook
namespace: argocd
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -109,8 +105,6 @@ The cluster generator will automatically target both local and non-local cluster
If you wish to target only remote clusters with your Applications (e.g. you want to exclude the local cluster), then use a cluster selector with labels, for example:
```yaml
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -143,8 +137,6 @@ You may pass additional, arbitrary string key-value pairs via the `values` field
In this example, a `revision` parameter value is passed, based on matching labels on the cluster secret:
```yaml
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -162,16 +154,16 @@ spec:
revision: stable
template:
metadata:
name: '{{.name}}-guestbook'
name: '{{name}}-guestbook'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
# The cluster values field for each generator will be substituted here:
targetRevision: '{{.values.revision}}'
targetRevision: '{{values.revision}}'
path: guestbook
destination:
server: '{{.server}}'
server: '{{server}}'
namespace: guestbook
```
@@ -192,8 +184,6 @@ Extending the example above, we could do something like this:
```yaml
spec:
goTemplate: true
goTemplateOptions: ["missingkey=error"]
generators:
- clusters:
selector:
@@ -202,8 +192,8 @@ spec:
# A key-value map for arbitrary parameters
values:
# If `my-custom-annotation` is in your cluster secret, `revision` will be substituted with it.
revision: '{{index .metadata.annotations "my-custom-annotation"}}'
clusterName: '{{.name}}'
revision: '{{metadata.annotations.my-custom-annotation}}'
clusterName: '{{name}}'
- clusters:
selector:
matchLabels:
@@ -211,19 +201,19 @@ spec:
values:
# production uses a different revision value, for 'stable' branch
revision: stable
clusterName: '{{.name}}'
clusterName: '{{name}}'
template:
metadata:
name: '{{.name}}-guestbook'
name: '{{name}}-guestbook'
spec:
project: "my-project"
source:
repoURL: https://github.com/argoproj/argocd-example-apps/
# The cluster values field for each generator will be substituted here:
targetRevision: '{{.values.revision}}'
targetRevision: '{{values.revision}}'
path: guestbook
destination:
# In this case this is equivalent to just using {{name}}
server: '{{.values.clusterName}}'
server: '{{values.clusterName}}'
namespace: guestbook
```

Some files were not shown because too many files have changed in this diff Show More