Compare commits

..

2 Commits

Author SHA1 Message Date
todaywasawesome
1b52b24cea Bump pyments version to 2.18 (latest) 2024-09-04 09:19:58 -06:00
todaywasawesome
8eb00f51df Update pygments to 2.15.1
Signed-off-by: todaywasawesome <dan@codefresh.io>
2024-09-04 08:53:03 -06:00
528 changed files with 12085 additions and 15580 deletions

View File

@@ -31,7 +31,7 @@ jobs:
docs: ${{ steps.filter.outputs.docs_any_changed }}
steps:
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- uses: tj-actions/changed-files@e9772d140489982e0e3704fea5ee93d536f1e275 # v45.0.1
- uses: tj-actions/changed-files@c65cd883420fd2eb864698a825fc4162dd94482c # v44.5.7
id: filter
with:
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
@@ -81,7 +81,7 @@ jobs:
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Restore go build cache
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -151,7 +151,7 @@ jobs:
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -215,7 +215,7 @@ jobs:
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -308,7 +308,7 @@ jobs:
node-version: '21.6.1'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -348,7 +348,7 @@ jobs:
fetch-depth: 0
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -448,7 +448,7 @@ jobs:
sudo chmod go-r $HOME/.kube/config
kubectl version
- name: Restore go build cache
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -539,4 +539,4 @@ jobs:
exit 0
else
exit 1
fi
fi

View File

@@ -64,7 +64,7 @@ jobs:
git stash pop
- name: Create pull request
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
uses: peter-evans/create-pull-request@4320041ed380b20e97d388d56a7fb4f9b8c20e79 # v7.0.0
with:
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"

View File

@@ -295,7 +295,7 @@ jobs:
if: ${{ env.UPDATE_VERSION == 'true' }}
- name: Create PR to update VERSION on master branch
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
uses: peter-evans/create-pull-request@4320041ed380b20e97d388d56a7fb4f9b8c20e79 # v7.0.0
with:
commit-message: Bump version in master
title: "chore: Bump version in master"

2
.gitpod.Dockerfile vendored
View File

@@ -1,4 +1,4 @@
FROM gitpod/workspace-full@sha256:230285e0b949e6d728d384b2029a4111db7b9c87c182f22f32a0be9e36b225df
FROM gitpod/workspace-full@sha256:fbff2dce4236535b96de0e94622bbe9a44fba954ca064862004c34e3e08904df
USER root

View File

@@ -43,7 +43,6 @@ packages:
ProjectGetter:
RbacEnforcer:
SettingsGetter:
UserGetter:
github.com/argoproj/argo-cd/v2/util/db:
interfaces:
ArgoDB:
@@ -66,4 +65,4 @@ packages:
SessionServiceClient:
github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster:
interfaces:
ClusterServiceServer:
ClusterServiceServer:

View File

@@ -2,7 +2,6 @@ version: 2
formats: all
mkdocs:
fail_on_warning: false
configuration: mkdocs.yml
python:
install:
- requirements: docs/requirements.txt

View File

@@ -1,10 +1,5 @@
# Changelog
## v2.13.6 (2025-03-21)
### Bug fixes
- fix: handle annotated git tags correctly in repo server cache (#21548) (#21771)
## v2.4.8 (2022-07-29)
### Bug fixes

View File

@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:3f85b7caad41a95462cf5b787d8
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS builder
FROM docker.io/library/golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS builder
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
@@ -83,7 +83,7 @@ WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.8.0@sha256:bd00c03095f7586432805dbf7989be10361d27987f93de904b1fc003949a4794 AS argocd-ui
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.8.0@sha256:8ec02324cb37718197de92e51677781be9f1345c709f31a1f44440c6036d24a2 AS argocd-ui
WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS argocd-build
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd

View File

@@ -553,7 +553,7 @@ build-docs-local:
.PHONY: build-docs
build-docs:
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs build'
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
.PHONY: serve-docs-local
serve-docs-local:
@@ -561,7 +561,7 @@ serve-docs-local:
.PHONY: serve-docs
serve-docs:
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
# Verify that kubectl can connect to your K8s cluster from Docker
.PHONY: verify-kube-connect

View File

@@ -11,7 +11,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [7shifts](https://www.7shifts.com/)
1. [Adevinta](https://www.adevinta.com/)
1. [Adfinis](https://adfinis.com)
1. [Adobe](https://www.adobe.com/)
1. [Adventure](https://jp.adventurekk.com/)
1. [Adyen](https://www.adyen.com)
1. [AirQo](https://airqo.net/)
@@ -30,7 +29,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Arctiq Inc.](https://www.arctiq.ca)
2. [Arturia](https://www.arturia.com)
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
1. [Augury](https://www.augury.com/)
1. [Autodesk](https://www.autodesk.com)
1. [Axians ACSP](https://www.axians.fr)
1. [Axual B.V.](https://axual.com)
@@ -41,7 +39,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Beez Innovation Labs](https://www.beezlabs.com/)
1. [Bedag Informatik AG](https://www.bedag.ch/)
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
1. [Believable Bots](https://believablebots.io)
1. [BigPanda](https://bigpanda.io)
1. [BioBox Analytics](https://biobox.io)
1. [BMW Group](https://www.bmwgroup.com/)
@@ -210,7 +207,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Moengage](https://www.moengage.com/)
1. [Money Forward](https://corp.moneyforward.com/en/)
1. [MOO Print](https://www.moo.com/)
1. [Mozilla](https://www.mozilla.org)
1. [MTN Group](https://www.mtn.com/)
1. [Municipality of The Hague](https://www.denhaag.nl/)
1. [My Job Glasses](https://myjobglasses.com)

View File

@@ -1 +1 @@
2.13.8
2.13.0

View File

@@ -18,7 +18,6 @@ import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"time"
@@ -33,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -142,7 +140,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// desiredApplications is the main list of all expected Applications from all generators in this appset.
desiredApplications, applicationSetReason, err := template.GenerateApplications(logCtx, applicationSetInfo, r.Generators, r.Renderer, r.Client)
if err != nil {
logCtx.Errorf("unable to generate applications: %v", err)
_ = r.setApplicationSetStatusCondition(ctx,
&applicationSetInfo,
argov1alpha1.ApplicationSetCondition{
@@ -152,8 +149,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
}, parametersGenerated,
)
// In order for the controller SDK to respect RequeueAfter, the error must be nil
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, err
}
parametersGenerated = true
@@ -431,29 +427,20 @@ func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.
if needToUpdateConditions || len(applicationSet.Status.Conditions) < len(newConditions) {
// fetch updated Application Set object before updating it
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
updatedAppset := &argov1alpha1.ApplicationSet{}
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
}
updatedAppset.Status.SetConditions(
newConditions, evaluatedTypes,
)
applicationSet.Status.SetConditions(
newConditions, evaluatedTypes,
)
// Update the newly fetched object with new set of conditions
err := r.Client.Status().Update(ctx, updatedAppset)
if err != nil {
return err
}
updatedAppset.DeepCopyInto(applicationSet)
return nil
})
// Update the newly fetched object with new set of conditions
err := r.Client.Status().Update(ctx, applicationSet)
if err != nil && !apierr.IsNotFound(err) {
return fmt.Errorf("unable to set application set condition: %w", err)
}
@@ -512,9 +499,11 @@ func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argov1
}
func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
return utils.IsNamespaceAllowed(namespaces, object.GetNamespace())
})
return predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
return utils.IsNamespaceAllowed(namespaces, e.Object.GetNamespace())
},
}
}
func appControllerIndexer(rawObj client.Object) []string {
@@ -994,7 +983,7 @@ func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1al
}
func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.ApplicationSet) bool {
return appset.Spec.Strategy != nil && appset.Spec.Strategy.RollingSync != nil && appset.Spec.Strategy.Type == "RollingSync" && len(appset.Spec.Strategy.RollingSync.Steps) > 0
return appset.Spec.Strategy != nil && appset.Spec.Strategy.RollingSync != nil && appset.Spec.Strategy.Type == "RollingSync"
}
func isApplicationHealthy(app argov1alpha1.Application) bool {
@@ -1017,16 +1006,6 @@ func statusStrings(app argov1alpha1.Application) (string, string, string) {
return healthStatusString, syncStatusString, operationPhaseString
}
func getAppStep(appName string, appStepMap map[string]int) int {
// if an application is not selected by any match expression, it defaults to step -1
step := -1
if appStep, ok := appStepMap[appName]; ok {
// 1-based indexing
step = appStep + 1
}
return step
}
// check the status of each Application's status and promote Applications to the next status if needed
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
@@ -1046,7 +1025,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
LastTransitionTime: &now,
Message: "No Application status found, defaulting status to Waiting.",
Status: "Waiting",
Step: fmt.Sprint(getAppStep(app.Name, appStepMap)),
Step: fmt.Sprint(appStepMap[app.Name] + 1),
TargetRevisions: app.Status.GetRevisions(),
}
} else {
@@ -1056,7 +1035,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
// upgrade any existing AppStatus that might have been set by an older argo-cd version
// note: currentAppStatus.TargetRevisions may be set to empty list earlier during migrations,
// to prevent other usage of r.Client.Status().Update to fail before reaching here.
if len(currentAppStatus.TargetRevisions) == 0 {
if currentAppStatus.TargetRevisions == nil || len(currentAppStatus.TargetRevisions) == 0 {
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
}
}
@@ -1071,7 +1050,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Waiting"
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
}
@@ -1089,14 +1068,14 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
}
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
}
}
@@ -1105,7 +1084,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
}
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
@@ -1113,7 +1092,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
}
appStatuses = append(appStatuses, currentAppStatus)
@@ -1134,12 +1113,14 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
if applicationSet.Spec.Strategy != nil && applicationSet.Spec.Strategy.Type != "" && applicationSet.Spec.Strategy.Type != "AllAtOnce" {
updateCountMap := []int{}
totalCountMap := []int{}
length := len(applicationSet.Spec.Strategy.RollingSync.Steps)
length := 0
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
length = len(applicationSet.Spec.Strategy.RollingSync.Steps)
}
for s := 0; s < length; s++ {
updateCountMap = append(updateCountMap, 0)
totalCountMap = append(totalCountMap, 0)
@@ -1149,8 +1130,10 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
for _, appStatus := range applicationSet.Status.ApplicationStatus {
totalCountMap[appStepMap[appStatus.Application]] += 1
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
updateCountMap[appStepMap[appStatus.Application]] += 1
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
updateCountMap[appStepMap[appStatus.Application]] += 1
}
}
}
@@ -1175,7 +1158,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
maxUpdateAllowed = false
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap), applicationSet.Name)
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
}
}
@@ -1184,7 +1167,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
appStatus.LastTransitionTime = &now
appStatus.Status = "Pending"
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
appStatus.Step = fmt.Sprint(getAppStep(appStatus.Application, appStepMap))
appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1)
updateCountMap[appStepMap[appStatus.Application]] += 1
}
@@ -1266,29 +1249,15 @@ func (r *ApplicationSetReconciler) migrateStatus(ctx context.Context, appset *ar
}
if update {
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
updatedAppset := &argov1alpha1.ApplicationSet{}
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
if err := r.Client.Status().Update(ctx, appset); err != nil {
return fmt.Errorf("unable to set application set status: %w", err)
}
if err := r.Get(ctx, namespacedName, appset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
updatedAppset.Status.ApplicationStatus = appset.Status.ApplicationStatus
// Update the newly fetched object with new set of ApplicationStatus
err := r.Client.Status().Update(ctx, updatedAppset)
if err != nil {
return err
}
updatedAppset.DeepCopyInto(appset)
return nil
})
if err != nil && !apierr.IsNotFound(err) {
return fmt.Errorf("unable to set application set condition: %w", err)
return fmt.Errorf("error fetching updated application set: %w", err)
}
}
return nil
@@ -1302,35 +1271,22 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
for _, status := range statusMap {
statuses = append(statuses, status)
}
sort.Slice(statuses, func(i, j int) bool {
return statuses[i].Name < statuses[j].Name
})
appset.Status.Resources = statuses
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
updatedAppset := &argov1alpha1.ApplicationSet{}
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
}
updatedAppset.Status.Resources = appset.Status.Resources
// Update the newly fetched object with new status resources
err := r.Client.Status().Update(ctx, updatedAppset)
if err != nil {
return err
}
updatedAppset.DeepCopyInto(appset)
return nil
})
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
err := r.Client.Status().Update(ctx, appset)
if err != nil {
logCtx.Errorf("unable to set application set status: %v", err)
return fmt.Errorf("unable to set application set status: %w", err)
}
if err := r.Get(ctx, namespacedName, appset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
}
return nil
}
@@ -1365,30 +1321,20 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
for i := range applicationStatuses {
applicationSet.Status.SetApplicationStatus(applicationStatuses[i])
}
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
updatedAppset := &argov1alpha1.ApplicationSet{}
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
}
updatedAppset.Status.ApplicationStatus = applicationSet.Status.ApplicationStatus
// Update the newly fetched object with new set of ApplicationStatus
err := r.Client.Status().Update(ctx, updatedAppset)
if err != nil {
return err
}
updatedAppset.DeepCopyInto(applicationSet)
return nil
})
// Update the newly fetched object with new set of ApplicationStatus
err := r.Client.Status().Update(ctx, applicationSet)
if err != nil {
logCtx.Errorf("unable to set application set status: %v", err)
return fmt.Errorf("unable to set application set status: %w", err)
}
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
if client.IgnoreNotFound(err) != nil {
return nil
}
return fmt.Errorf("error fetching updated application set: %w", err)
}
}
return nil

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,7 @@ spec:
app: guestbook-ui
spec:
containers:
- image: quay.io/argoprojlabs/argocd-e2e-container:0.2
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -14,7 +14,7 @@ spec:
app: guestbook-ui
spec:
containers:
- image: quay.io/argoprojlabs/argocd-e2e-container:0.2
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -5,7 +5,7 @@
replicaCount: 1
image:
repository: quay.io/argoprojlabs/argocd-e2e-container
repository: gcr.io/heptio-images/ks-guestbook-demo
tag: 0.1
pullPolicy: IfNotPresent

View File

@@ -14,7 +14,7 @@ spec:
app: guestbook-ui
spec:
containers:
- image: quay.io/argoprojlabs/argocd-e2e-container:0.2
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -14,7 +14,7 @@ spec:
app: guestbook-ui
spec:
containers:
- image: quay.io/argoprojlabs/argocd-e2e-container:0.2
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -14,7 +14,7 @@ spec:
app: guestbook-ui
spec:
containers:
- image: quay.io/argoprojlabs/argocd-e2e-container:0.2
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -14,7 +14,7 @@ spec:
app: guestbook-ui
spec:
containers:
- image: quay.io/argoprojlabs/argocd-e2e-container:0.2
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -14,7 +14,7 @@ spec:
app: guestbook-ui
spec:
containers:
- image: quay.io/argoprojlabs/argocd-e2e-container:0.2
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -218,7 +218,7 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
res = append(res, params)
}
} else {
log.Warningf("clusterDecisionResource status.%s missing", statusListKey)
log.Warningf("clusterDecisionResource status." + statusListKey + " missing")
return nil, nil
}

View File

@@ -78,7 +78,7 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
return nil, fmt.Errorf("error getting project %s: %w", project, err)
}
// we need to verify the signature on the Git revision if GPG is enabled
verifyCommit = len(appProject.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled()
verifyCommit = appProject.Spec.SignatureKeys != nil && len(appProject.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled()
}
var err error

View File

@@ -694,7 +694,7 @@ func TestPluginGenerateParams(t *testing.T) {
require.NoError(t, err)
gotJson, err := json.Marshal(got)
require.NoError(t, err)
assert.JSONEq(t, string(expectedJson), string(gotJson))
assert.Equal(t, string(expectedJson), string(gotJson))
}
})
}

View File

@@ -168,7 +168,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
if err != nil {
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
}
return pullrequest.NewBitbucketServiceBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
return pullrequest.NewBitbucketServiceBearerToken(ctx, providerConfig.API, appToken, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
} else if providerConfig.BasicAuth != nil {
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
if err != nil {

View File

@@ -19,7 +19,7 @@ type BitbucketCloudPullRequest struct {
ID int `json:"id"`
Title string `json:"title"`
Source BitbucketCloudPullRequestSource `json:"source"`
Author BitbucketCloudPullRequestAuthor `json:"author"`
Author string `json:"author"`
}
type BitbucketCloudPullRequestSource struct {
@@ -35,11 +35,6 @@ type BitbucketCloudPullRequestSourceCommit struct {
Hash string `json:"hash"`
}
// Also have display_name and uuid, but don't plan to use them.
type BitbucketCloudPullRequestAuthor struct {
Nickname string `json:"nickname"`
}
type PullRequestResponse struct {
Page int32 `json:"page"`
Size int32 `json:"size"`
@@ -139,7 +134,7 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
Title: pull.Title,
Branch: pull.Source.Branch.Name,
HeadSHA: pull.Source.Commit.Hash,
Author: pull.Author.Nickname,
Author: pull.Author,
})
}

View File

@@ -37,9 +37,7 @@ func defaultHandlerCloud(t *testing.T) func(http.ResponseWriter, *http.Request)
"hash": "1a8dd249c04a"
}
},
"author": {
"nickname": "testName"
}
"author": "testName"
}
]
}`)
@@ -156,9 +154,7 @@ func TestListPullRequestPaginationCloud(t *testing.T) {
"hash": "1a8dd249c04a"
}
},
"author": {
"nickname": "testName"
}
"author": "testName"
},
{
"id": 102,
@@ -172,9 +168,7 @@ func TestListPullRequestPaginationCloud(t *testing.T) {
"hash": "4cf807e67a6d"
}
},
"author": {
"nickname": "testName"
}
"author": "testName"
}
]
}`, r.Host))
@@ -197,9 +191,7 @@ func TestListPullRequestPaginationCloud(t *testing.T) {
"hash": "6344d9623e3b"
}
},
"author": {
"nickname": "testName"
}
"author": "testName"
}
]
}`, r.Host))
@@ -347,9 +339,7 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
"hash": "1a8dd249c04a"
}
},
"author": {
"nickname": "testName"
}
"author": "testName"
},
{
"id": 200,
@@ -363,9 +353,7 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
"hash": "4cf807e67a6d"
}
},
"author": {
"nickname": "testName"
}
"author": "testName"
}
]
}`, r.Host))
@@ -388,9 +376,7 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
"hash": "6344d9623e3b"
}
},
"author": {
"nickname": "testName"
}
"author": "testName"
}
]
}`, r.Host))

View File

@@ -46,7 +46,7 @@ func (c *ExtendedClient) GetContents(repo *Repository, path string) (bool, error
return true, nil
}
return false, fmt.Errorf("%s", resp.Status)
return false, fmt.Errorf(resp.Status)
}
var _ SCMProviderService = &BitBucketCloudProvider{}

View File

@@ -51,12 +51,9 @@ const (
// if we used destination name we infer the server url
// if we used both name and server then we return an invalid spec error
func ValidateDestination(ctx context.Context, dest *appv1.ApplicationDestination, clientset kubernetes.Interface, argoCDNamespace string) error {
if dest.IsServerInferred() && dest.IsNameInferred() {
return fmt.Errorf("application destination can't have both name and server inferred: %s %s", dest.Name, dest.Server)
}
if dest.Name != "" {
if dest.Server == "" {
server, err := getDestinationBy(ctx, dest.Name, clientset, argoCDNamespace, true)
server, err := getDestinationServer(ctx, dest.Name, clientset, argoCDNamespace)
if err != nil {
return fmt.Errorf("unable to find destination server: %w", err)
}
@@ -64,25 +61,14 @@ func ValidateDestination(ctx context.Context, dest *appv1.ApplicationDestination
return fmt.Errorf("application references destination cluster %s which does not exist", dest.Name)
}
dest.SetInferredServer(server)
} else if !dest.IsServerInferred() && !dest.IsNameInferred() {
} else if !dest.IsServerInferred() {
return fmt.Errorf("application destination can't have both name and server defined: %s %s", dest.Name, dest.Server)
}
} else if dest.Server != "" {
if dest.Name == "" {
serverName, err := getDestinationBy(ctx, dest.Server, clientset, argoCDNamespace, false)
if err != nil {
return fmt.Errorf("unable to find destination server: %w", err)
}
if serverName == "" {
return fmt.Errorf("application references destination cluster %s which does not exist", dest.Server)
}
dest.SetInferredName(serverName)
}
}
return nil
}
func getDestinationBy(ctx context.Context, cluster string, clientset kubernetes.Interface, argoCDNamespace string, byName bool) (string, error) {
func getDestinationServer(ctx context.Context, clusterName string, clientset kubernetes.Interface, argoCDNamespace string) (string, error) {
// settingsMgr := settings.NewSettingsManager(context.TODO(), clientset, namespace)
// argoDB := db.NewDB(namespace, settingsMgr, clientset)
// clusterList, err := argoDB.ListClusters(ctx)
@@ -92,17 +78,14 @@ func getDestinationBy(ctx context.Context, cluster string, clientset kubernetes.
}
var servers []string
for _, c := range clusterList.Items {
if byName && c.Name == cluster {
if c.Name == clusterName {
servers = append(servers, c.Server)
}
if !byName && c.Server == cluster {
servers = append(servers, c.Name)
}
}
if len(servers) > 1 {
return "", fmt.Errorf("there are %d clusters with the same name: %v", len(servers), servers)
} else if len(servers) == 0 {
return "", fmt.Errorf("there are no clusters with this name: %s", cluster)
return "", fmt.Errorf("there are no clusters with this name: %s", clusterName)
}
return servers[0], nil
}

View File

@@ -92,12 +92,7 @@ func TestValidateDestination(t *testing.T) {
Namespace: "default",
}
secret := createClusterSecret("my-secret", "minikube", "https://127.0.0.1:6443")
objects := []runtime.Object{}
objects = append(objects, secret)
kubeclientset := fake.NewSimpleClientset(objects...)
appCond := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
appCond := ValidateDestination(context.Background(), &dest, nil, fakeNamespace)
require.NoError(t, appCond)
assert.False(t, dest.IsServerInferred())
})

View File

@@ -229,7 +229,7 @@ spec:
require.NoError(t, err)
yamlExpected, err := yaml.Marshal(tc.expectedApp)
require.NoError(t, err)
assert.YAMLEq(t, string(yamlExpected), string(yamlFound))
assert.Equal(t, string(yamlExpected), string(yamlFound))
})
}
}

View File

@@ -273,7 +273,7 @@ func (r *Render) RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *
// b) there IS a syncPolicy, but preserveResourcesOnDeletion is set to false
// See TestRenderTemplateParamsFinalizers in util_test.go for test-based definition of behaviour
if (syncPolicy == nil || !syncPolicy.PreserveResourcesOnDeletion) &&
len(replacedTmpl.ObjectMeta.Finalizers) == 0 {
(replacedTmpl.ObjectMeta.Finalizers == nil || len(replacedTmpl.ObjectMeta.Finalizers) == 0) {
replacedTmpl.ObjectMeta.Finalizers = []string{"resources-finalizer.argocd.argoproj.io"}
}

View File

@@ -1,186 +0,0 @@
{
"ref": "refs/heads/env/dev",
"before": "d5c1ffa8e294bc18c639bfb4e0df499251034414",
"after": "63738bb582c8b540af7bcfc18f87c575c3ed66e0",
"created": false,
"deleted": false,
"forced": true,
"base_ref": null,
"compare": "https://github.com/org/repo/compare/d5c1ffa8e294...63738bb582c8",
"commits": [
{
"id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0",
"tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19",
"distinct": true,
"message": "Add staging-argocd-demo environment",
"timestamp": "2018-05-04T15:40:02-07:00",
"url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0",
"author": {
"name": "Jesse Suen",
"email": "Jesse_Suen@example.com",
"username": "org"
},
"committer": {
"name": "Jesse Suen",
"email": "Jesse_Suen@example.com",
"username": "org"
},
"added": [
"ksapps/test-app/environments/staging-argocd-demo/main.jsonnet",
"ksapps/test-app/environments/staging-argocd-demo/params.libsonnet"
],
"removed": [
],
"modified": [
"ksapps/test-app/app.yaml"
]
}
],
"head_commit": {
"id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0",
"tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19",
"distinct": true,
"message": "Add staging-argocd-demo environment",
"timestamp": "2018-05-04T15:40:02-07:00",
"url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0",
"author": {
"name": "Jesse Suen",
"email": "Jesse_Suen@example.com",
"username": "org"
},
"committer": {
"name": "Jesse Suen",
"email": "Jesse_Suen@example.com",
"username": "org"
},
"added": [
"ksapps/test-app/environments/staging-argocd-demo/main.jsonnet",
"ksapps/test-app/environments/staging-argocd-demo/params.libsonnet"
],
"removed": [
],
"modified": [
"ksapps/test-app/app.yaml"
]
},
"repository": {
"id": 123060978,
"name": "repo",
"full_name": "org/repo",
"owner": {
"name": "org",
"email": "org@users.noreply.github.com",
"login": "org",
"id": 12677113,
"avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/org",
"html_url": "https://github.com/org",
"followers_url": "https://api.github.com/users/org/followers",
"following_url": "https://api.github.com/users/org/following{/other_user}",
"gists_url": "https://api.github.com/users/org/gists{/gist_id}",
"starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/org/subscriptions",
"organizations_url": "https://api.github.com/users/org/orgs",
"repos_url": "https://api.github.com/users/org/repos",
"events_url": "https://api.github.com/users/org/events{/privacy}",
"received_events_url": "https://api.github.com/users/org/received_events",
"type": "User",
"site_admin": false
},
"private": false,
"html_url": "https://github.com/org/repo",
"description": "Test Repository",
"fork": false,
"url": "https://github.com/org/repo",
"forks_url": "https://api.github.com/repos/org/repo/forks",
"keys_url": "https://api.github.com/repos/org/repo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/org/repo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/org/repo/teams",
"hooks_url": "https://api.github.com/repos/org/repo/hooks",
"issue_events_url": "https://api.github.com/repos/org/repo/issues/events{/number}",
"events_url": "https://api.github.com/repos/org/repo/events",
"assignees_url": "https://api.github.com/repos/org/repo/assignees{/user}",
"branches_url": "https://api.github.com/repos/org/repo/branches{/branch}",
"tags_url": "https://api.github.com/repos/org/repo/tags",
"blobs_url": "https://api.github.com/repos/org/repo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/org/repo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/org/repo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/org/repo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/org/repo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/org/repo/languages",
"stargazers_url": "https://api.github.com/repos/org/repo/stargazers",
"contributors_url": "https://api.github.com/repos/org/repo/contributors",
"subscribers_url": "https://api.github.com/repos/org/repo/subscribers",
"subscription_url": "https://api.github.com/repos/org/repo/subscription",
"commits_url": "https://api.github.com/repos/org/repo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/org/repo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/org/repo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/org/repo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/org/repo/contents/{+path}",
"compare_url": "https://api.github.com/repos/org/repo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/org/repo/merges",
"archive_url": "https://api.github.com/repos/org/repo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/org/repo/downloads",
"issues_url": "https://api.github.com/repos/org/repo/issues{/number}",
"pulls_url": "https://api.github.com/repos/org/repo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/org/repo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/org/repo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/org/repo/labels{/name}",
"releases_url": "https://api.github.com/repos/org/repo/releases{/id}",
"deployments_url": "https://api.github.com/repos/org/repo/deployments",
"created_at": 1519698615,
"updated_at": "2018-05-04T22:37:55Z",
"pushed_at": 1525473610,
"git_url": "git://github.com/org/repo.git",
"ssh_url": "git@github.com:org/repo.git",
"clone_url": "https://github.com/org/repo.git",
"svn_url": "https://github.com/org/repo",
"homepage": null,
"size": 538,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 1,
"mirror_url": null,
"archived": false,
"open_issues_count": 0,
"license": null,
"forks": 1,
"open_issues": 0,
"watchers": 0,
"default_branch": "master",
"stargazers": 0,
"master_branch": "master"
},
"pusher": {
"name": "org",
"email": "org@users.noreply.github.com"
},
"sender": {
"login": "org",
"id": 12677113,
"avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/org",
"html_url": "https://github.com/org",
"followers_url": "https://api.github.com/users/org/followers",
"following_url": "https://api.github.com/users/org/following{/other_user}",
"gists_url": "https://api.github.com/users/org/gists{/gist_id}",
"starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/org/subscriptions",
"organizations_url": "https://api.github.com/users/org/orgs",
"repos_url": "https://api.github.com/users/org/repos",
"events_url": "https://api.github.com/users/org/events{/privacy}",
"received_events_url": "https://api.github.com/users/org/received_events",
"type": "User",
"site_admin": false
}
}

View File

@@ -19,7 +19,6 @@ import (
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
"github.com/argoproj/argo-cd/v2/util/webhook"
"github.com/go-playground/webhooks/v6/azuredevops"
"github.com/go-playground/webhooks/v6/github"
@@ -191,6 +190,11 @@ func (h *WebhookHandler) Handler(w http.ResponseWriter, r *http.Request) {
}
}
func parseRevision(ref string) string {
refParts := strings.SplitN(ref, "/", 3)
return refParts[len(refParts)-1]
}
func getGitGeneratorInfo(payload interface{}) *gitGeneratorInfo {
var (
webURL string
@@ -200,16 +204,16 @@ func getGitGeneratorInfo(payload interface{}) *gitGeneratorInfo {
switch payload := payload.(type) {
case github.PushPayload:
webURL = payload.Repository.HTMLURL
revision = webhook.ParseRevision(payload.Ref)
revision = parseRevision(payload.Ref)
touchedHead = payload.Repository.DefaultBranch == revision
case gitlab.PushEventPayload:
webURL = payload.Project.WebURL
revision = webhook.ParseRevision(payload.Ref)
revision = parseRevision(payload.Ref)
touchedHead = payload.Project.DefaultBranch == revision
case azuredevops.GitPushEvent:
// See: https://learn.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#git.push
webURL = payload.Resource.Repository.RemoteURL
revision = webhook.ParseRevision(payload.Resource.RefUpdates[0].Name)
revision = parseRevision(payload.Resource.RefUpdates[0].Name)
touchedHead = payload.Resource.RefUpdates[0].Name == payload.Resource.Repository.DefaultBranch
// unfortunately, Azure DevOps doesn't provide a list of changed files
default:
@@ -369,12 +373,12 @@ func shouldRefreshPluginGenerator(gen *v1alpha1.PluginGenerator) bool {
}
func genRevisionHasChanged(gen *v1alpha1.GitGenerator, revision string, touchedHead bool) bool {
targetRev := webhook.ParseRevision(gen.Revision)
targetRev := parseRevision(gen.Revision)
if targetRev == "HEAD" || targetRev == "" { // revision is head
return touchedHead
}
return targetRev == revision || gen.Revision == revision
return targetRev == revision
}
func gitGeneratorUsesURL(gen *v1alpha1.GitGenerator, webURL string, repoRegexp *regexp.Regexp) bool {

View File

@@ -67,15 +67,6 @@ func TestWebhookHandler(t *testing.T) {
expectedStatusCode: http.StatusOK,
expectedRefresh: true,
},
{
desc: "WebHook from a GitHub repository via Commit shorthand",
headerKey: "X-GitHub-Event",
headerValue: "push",
payloadFile: "github-commit-event-feature-branch.json",
effectedAppSets: []string{"github-shorthand", "matrix-pull-request-github-plugin", "plugin"},
expectedStatusCode: http.StatusOK,
expectedRefresh: true,
},
{
desc: "WebHook from a GitHub repository via Commit to branch",
headerKey: "X-GitHub-Event",
@@ -201,7 +192,6 @@ func TestWebhookHandler(t *testing.T) {
fakeAppWithGitGenerator("git-github", namespace, "https://github.com/org/repo"),
fakeAppWithGitGenerator("git-gitlab", namespace, "https://gitlab/group/name"),
fakeAppWithGitGenerator("git-azure-devops", namespace, "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"),
fakeAppWithGitGeneratorWithRevision("github-shorthand", namespace, "https://github.com/org/repo", "env/dev"),
fakeAppWithGithubPullRequestGenerator("pull-request-github", namespace, "CodErTOcat", "Hello-World"),
fakeAppWithGitlabPullRequestGenerator("pull-request-gitlab", namespace, "100500"),
fakeAppWithAzureDevOpsPullRequestGenerator("pull-request-azure-devops", namespace, "DefaultCollection", "Fabrikam"),
@@ -312,62 +302,14 @@ func mockGenerators() map[string]generators.Generator {
}
func TestGenRevisionHasChanged(t *testing.T) {
type args struct {
gen *v1alpha1.GitGenerator
revision string
touchedHead bool
}
tests := []struct {
name string
args args
want bool
}{
{name: "touchedHead", args: args{
gen: &v1alpha1.GitGenerator{},
revision: "main",
touchedHead: true,
}, want: true},
{name: "didntTouchHead", args: args{
gen: &v1alpha1.GitGenerator{},
revision: "main",
touchedHead: false,
}, want: false},
{name: "foundEqualShort", args: args{
gen: &v1alpha1.GitGenerator{Revision: "dev"},
revision: "dev",
touchedHead: true,
}, want: true},
{name: "foundEqualLongGen", args: args{
gen: &v1alpha1.GitGenerator{Revision: "refs/heads/dev"},
revision: "dev",
touchedHead: true,
}, want: true},
{name: "foundNotEqualLongGen", args: args{
gen: &v1alpha1.GitGenerator{Revision: "refs/heads/dev"},
revision: "main",
touchedHead: true,
}, want: false},
{name: "foundNotEqualShort", args: args{
gen: &v1alpha1.GitGenerator{Revision: "dev"},
revision: "main",
touchedHead: false,
}, want: false},
{name: "foundEqualTag", args: args{
gen: &v1alpha1.GitGenerator{Revision: "v3.14.1"},
revision: "v3.14.1",
touchedHead: false,
}, want: true},
{name: "foundEqualTagLongGen", args: args{
gen: &v1alpha1.GitGenerator{Revision: "refs/tags/v3.14.1"},
revision: "v3.14.1",
touchedHead: false,
}, want: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, genRevisionHasChanged(tt.args.gen, tt.args.revision, tt.args.touchedHead), "genRevisionHasChanged(%v, %v, %v)", tt.args.gen, tt.args.revision, tt.args.touchedHead)
})
}
assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{}, "master", true))
assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{}, "master", false))
assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "dev"}, "dev", true))
assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "dev"}, "master", false))
assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "refs/heads/dev"}, "dev", true))
assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "refs/heads/dev"}, "master", false))
}
func fakeAppWithGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet {
@@ -389,12 +331,6 @@ func fakeAppWithGitGenerator(name, namespace, repo string) *v1alpha1.Application
}
}
func fakeAppWithGitGeneratorWithRevision(name, namespace, repo, revision string) *v1alpha1.ApplicationSet {
appSet := fakeAppWithGitGenerator(name, namespace, repo)
appSet.Spec.Generators[0].Git.Revision = revision
return appSet
}
func fakeAppWithGitlabPullRequestGenerator(name, namespace, projectId string) *v1alpha1.ApplicationSet {
return &v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
@@ -775,7 +711,7 @@ func fakeAppWithMatrixAndPullRequestGeneratorWithPluginGenerator(name, namespace
func newFakeClient(ns string) *kubefake.Clientset {
s := runtime.NewScheme()
s.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.ApplicationSet{})
return kubefake.NewClientset(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "argocd-cm", Namespace: ns, Labels: map[string]string{
return kubefake.NewSimpleClientset(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "argocd-cm", Namespace: ns, Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
}}}, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{

69
assets/swagger.json generated
View File

@@ -1990,39 +1990,6 @@
}
}
},
"/api/v1/applicationsets/generate": {
"post": {
"tags": [
"ApplicationSetService"
],
"summary": "Generate generates",
"operationId": "ApplicationSetService_Generate",
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/applicationsetApplicationSetGenerateRequest"
}
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/applicationsetApplicationSetGenerateResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
}
}
},
"/api/v1/applicationsets/{name}": {
"get": {
"tags": [
@@ -4749,12 +4716,6 @@
"help": {
"$ref": "#/definitions/clusterHelp"
},
"impersonationEnabled": {
"type": "boolean"
},
"installationID": {
"type": "string"
},
"kustomizeOptions": {
"$ref": "#/definitions/v1alpha1KustomizeOptions"
},
@@ -5976,13 +5937,6 @@
"type": "string",
"title": "Description contains optional project description"
},
"destinationServiceAccounts": {
"description": "DestinationServiceAccounts holds information about the service accounts to be impersonated for the application sync operation for each destination.",
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1ApplicationDestinationServiceAccount"
}
},
"destinations": {
"type": "array",
"title": "Destinations contains list of destinations available for deployment",
@@ -6114,24 +6068,6 @@
}
}
},
"v1alpha1ApplicationDestinationServiceAccount": {
"description": "ApplicationDestinationServiceAccount holds information about the service account to be impersonated for the application sync operation.",
"type": "object",
"properties": {
"defaultServiceAccount": {
"type": "string",
"title": "DefaultServiceAccount to be used for impersonation during the sync operation"
},
"namespace": {
"description": "Namespace specifies the target namespace for the application's resources.",
"type": "string"
},
"server": {
"description": "Server specifies the URL of the target cluster's Kubernetes control plane API.",
"type": "string"
}
}
},
"v1alpha1ApplicationList": {
"type": "object",
"title": "ApplicationList is list of Application resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
@@ -9203,11 +9139,6 @@
"description": "SyncOperation contains details about a sync operation.",
"type": "object",
"properties": {
"autoHealAttemptsCount": {
"type": "integer",
"format": "int64",
"title": "SelfHealAttemptsCount contains the number of auto-heal attempts"
},
"dryRun": {
"type": "boolean",
"title": "DryRun specifies to perform a `kubectl apply --dry-run` without actually performing the sync"

View File

@@ -13,7 +13,6 @@ import (
"github.com/redis/go-redis/v9"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@@ -25,7 +24,6 @@ import (
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
@@ -58,16 +56,12 @@ func NewCommand() *cobra.Command {
repoServerAddress string
repoServerTimeoutSeconds int
selfHealTimeoutSeconds int
selfHealBackoffTimeoutSeconds int
selfHealBackoffFactor int
selfHealBackoffCapSeconds int
statusProcessors int
operationProcessors int
glogLevel int
metricsPort int
metricsCacheExpiration time.Duration
metricsAplicationLabels []string
metricsAplicationConditions []string
kubectlParallelismLimit int64
cacheSource func() (*appstatecache.Cache, error)
redisClient *redis.Client
@@ -83,9 +77,6 @@ func NewCommand() *cobra.Command {
enableDynamicClusterDistribution bool
serverSideDiff bool
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
// argocd k8s event logging flag
enableK8sEvent []string
)
command := cobra.Command{
Use: cliName,
@@ -160,14 +151,6 @@ func NewCommand() *cobra.Command {
kubectl := kubeutil.NewKubectl()
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
errors.CheckError(err)
var selfHealBackoff *wait.Backoff
if selfHealBackoffTimeoutSeconds != 0 {
selfHealBackoff = &wait.Backoff{
Duration: time.Duration(selfHealBackoffTimeoutSeconds) * time.Second,
Factor: float64(selfHealBackoffFactor),
Cap: time.Duration(selfHealBackoffCapSeconds) * time.Second,
}
}
appController, err = controller.NewApplicationController(
namespace,
settingsMgr,
@@ -180,12 +163,10 @@ func NewCommand() *cobra.Command {
hardResyncDuration,
time.Duration(appResyncJitter)*time.Second,
time.Duration(selfHealTimeoutSeconds)*time.Second,
selfHealBackoff,
time.Duration(repoErrorGracePeriod)*time.Second,
metricsPort,
metricsCacheExpiration,
metricsAplicationLabels,
metricsAplicationConditions,
kubectlParallelismLimit,
persistResourceHealth,
clusterSharding,
@@ -194,7 +175,6 @@ func NewCommand() *cobra.Command {
serverSideDiff,
enableDynamicClusterDistribution,
ignoreNormalizerOpts,
enableK8sEvent,
)
errors.CheckError(err)
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
@@ -244,15 +224,11 @@ func NewCommand() *cobra.Command {
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 0, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
command.Flags().IntVar(&selfHealBackoffTimeoutSeconds, "self-heal-backoff-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_TIMEOUT_SECONDS", 2, 0, math.MaxInt32), "Specifies initial timeout of exponential backoff between self heal attempts")
command.Flags().IntVar(&selfHealBackoffFactor, "self-heal-backoff-factor", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_FACTOR", 3, 0, math.MaxInt32), "Specifies factor of exponential timeout between application self heal attempts")
command.Flags().IntVar(&selfHealBackoffCapSeconds, "self-heal-backoff-cap-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_CAP_SECONDS", 300, 0, math.MaxInt32), "Specifies max timeout of exponential backoff between application self heal attempts")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
command.Flags().StringSliceVar(&metricsAplicationLabels, "metrics-application-labels", []string{}, "List of Application labels that will be added to the argocd_application_labels metric")
command.Flags().StringSliceVar(&metricsAplicationConditions, "metrics-application-conditions", []string{}, "List of Application conditions that will be added to the argocd_application_conditions metric")
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
command.Flags().BoolVar(&otlpInsecure, "otlp-insecure", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_INSECURE", true), "OpenTelemetry collector insecure mode")
command.Flags().StringToStringVar(&otlpHeaders, "otlp-headers", env.ParseStringToStringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_HEADERS", map[string]string{}, ","), "List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2)")
@@ -272,9 +248,6 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
command.Flags().BoolVar(&serverSideDiff, "server-side-diff-enabled", env.ParseBoolFromEnv(common.EnvServerSideDiff, false), "Feature flag to enable ServerSide diff. Default (\"false\")")
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
// argocd k8s event logging flag
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, cacheutil.Options{
OnClientCreated: func(client *redis.Client) {
redisClient = client

View File

@@ -270,7 +270,7 @@ func startWebhookServer(webhookHandler *webhook.WebhookHandler, webhookAddr stri
mux := http.NewServeMux()
mux.HandleFunc("/api/webhook", webhookHandler.Handler)
go func() {
log.Infof("Starting webhook server %s", webhookAddr)
log.Info("Starting webhook server")
err := http.ListenAndServe(webhookAddr, mux)
if err != nil {
log.Error(err, "failed to start webhook server")

View File

@@ -1,14 +1,10 @@
package commands
import (
"context"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
@@ -66,8 +62,7 @@ func NewCommand() *cobra.Command {
Use: "controller",
Short: "Starts Argo CD Notifications controller",
RunE: func(c *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := c.Context()
vers := common.GetVersion()
namespace, _, err := clientConfig.Namespace()
@@ -151,17 +146,6 @@ func NewCommand() *cobra.Command {
return fmt.Errorf("failed to initialize controller: %w", err)
}
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
s := <-sigCh
log.Printf("got signal %v, attempting graceful shutdown", s)
cancel()
}()
go ctrl.Run(ctx, processorsCount)
<-ctx.Done()
return nil

View File

@@ -27,7 +27,6 @@ import (
reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
"github.com/argoproj/argo-cd/v2/server"
servercache "github.com/argoproj/argo-cd/v2/server/cache"
"github.com/argoproj/argo-cd/v2/util/argo"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/dex"
@@ -92,9 +91,6 @@ func NewCommand() *cobra.Command {
scmRootCAPath string
allowedScmProviders []string
enableScmProviders bool
// argocd k8s event logging flag
enableK8sEvent []string
)
command := &cobra.Command{
Use: cliName,
@@ -155,7 +151,6 @@ func NewCommand() *cobra.Command {
controllerClient, err := client.New(config, client.Options{Scheme: scheme})
errors.CheckError(err)
controllerClient = client.NewDryRunClient(controllerClient)
controllerClient = client.NewNamespacedClient(controllerClient, namespace)
// Load CA information to use for validating connections to the
// repository server, if strict TLS validation was requested.
@@ -234,7 +229,6 @@ func NewCommand() *cobra.Command {
ApplicationNamespaces: applicationNamespaces,
EnableProxyExtension: enableProxyExtension,
WebhookParallelism: webhookParallelism,
EnableK8sEvent: enableK8sEvent,
}
appsetOpts := server.ApplicationSetOpts{
@@ -309,7 +303,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_SERVER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
// Flags related to the applicationSet component.
command.Flags().StringVar(&scmRootCAPath, "appset-scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")

View File

@@ -190,11 +190,7 @@ func isArgoCDConfigMap(name string) bool {
// specsEqual returns if the spec, data, labels, annotations, and finalizers of the two
// supplied objects are equal, indicating that no update is necessary during importing
func specsEqual(left, right unstructured.Unstructured) bool {
leftAnnotation := left.GetAnnotations()
rightAnnotation := right.GetAnnotations()
delete(leftAnnotation, apiv1.LastAppliedConfigAnnotation)
delete(rightAnnotation, apiv1.LastAppliedConfigAnnotation)
if !reflect.DeepEqual(leftAnnotation, rightAnnotation) {
if !reflect.DeepEqual(left.GetAnnotations(), right.GetAnnotations()) {
return false
}
if !reflect.DeepEqual(left.GetLabels(), right.GetLabels()) {

View File

@@ -97,7 +97,7 @@ func NewGenAppSpecCommand() *cobra.Command {
argocd admin app generate-spec nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc
# Generate declarative config for a Kustomize app
argocd admin app generate-spec kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image quay.io/argoprojlabs/argocd-e2e-container:0.1
argocd admin app generate-spec kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1
# Generate declarative config for a app using a custom tool:
argocd admin app generate-spec kasane --repo https://github.com/argoproj/argocd-example-apps.git --path plugins/kasane --dest-namespace default --dest-server https://kubernetes.default.svc --config-management-plugin kasane
@@ -387,7 +387,7 @@ func reconcileApplications(
return true
}, func(r *http.Request) error {
return nil
}, []string{}, []string{})
}, []string{})
if err != nil {
return nil, err
}

View File

@@ -136,7 +136,6 @@ func NewImportCommand() *cobra.Command {
dryRun bool
verbose bool
stopOperation bool
ignoreTracking bool
applicationNamespaces []string
applicationsetNamespaces []string
)
@@ -265,13 +264,6 @@ func NewImportCommand() *cobra.Command {
continue
}
}
// If there is a live object, remove the tracking annotations/label that might conflict
// when argo is managed with an application.
if ignoreTracking && exists {
updateTracking(bakObj, &liveObj)
}
if !exists {
isForbidden := false
if !dryRun {
@@ -357,7 +349,6 @@ func NewImportCommand() *cobra.Command {
clientConfig = cli.AddKubectlFlagsToCmd(&command)
command.Flags().BoolVar(&dryRun, "dry-run", false, "Print what will be performed")
command.Flags().BoolVar(&prune, "prune", false, "Prune secrets, applications and projects which do not appear in the backup")
command.Flags().BoolVar(&ignoreTracking, "ignore-tracking", false, "Do not update the tracking annotation if the resource is already tracked")
command.Flags().BoolVar(&verbose, "verbose", false, "Verbose output (versus only changed output)")
command.Flags().BoolVar(&stopOperation, "stop-operation", false, "Stop any existing operations")
command.Flags().StringSliceVarP(&applicationNamespaces, "application-namespaces", "", []string{}, fmt.Sprintf("Comma separated list of namespace globs to which import of applications is allowed. If not provided value from '%s' in %s will be used,if it's not defined only applications without an explicit namespace will be imported to the Argo CD namespace", applicationNamespacesCmdParamsKey, common.ArgoCDCmdParamsConfigMapName))
@@ -431,32 +422,3 @@ func updateLive(bak, live *unstructured.Unstructured, stopOperation bool) *unstr
}
return newLive
}
// updateTracking will update the tracking label and annotation in the bak resources to the
// value of the live resource.
func updateTracking(bak, live *unstructured.Unstructured) {
// update the common annotation
bakAnnotations := bak.GetAnnotations()
liveAnnotations := live.GetAnnotations()
if liveAnnotations != nil && bakAnnotations != nil {
if v, ok := liveAnnotations[common.AnnotationKeyAppInstance]; ok {
if _, ok := bakAnnotations[common.AnnotationKeyAppInstance]; ok {
bakAnnotations[common.AnnotationKeyAppInstance] = v
bak.SetAnnotations(bakAnnotations)
}
}
}
// update the common label
// A custom label can be set, but it is impossible to know which instance is managing the application
bakLabels := bak.GetLabels()
liveLabels := live.GetLabels()
if liveLabels != nil && bakLabels != nil {
if v, ok := liveLabels[common.LabelKeyAppInstance]; ok {
if _, ok := bakLabels[common.LabelKeyAppInstance]; ok {
bakLabels[common.LabelKeyAppInstance] = v
bak.SetLabels(bakLabels)
}
}
}
}

View File

@@ -1,87 +0,0 @@
package admin
import (
"testing"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/v2/common"
)
func newBackupObject(trackingValue string, trackingLabel bool, trackingAnnotation bool) *unstructured.Unstructured {
cm := v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-configmap",
Namespace: "namespace",
},
Data: map[string]string{
"foo": "bar",
},
}
if trackingLabel {
cm.SetLabels(map[string]string{
common.LabelKeyAppInstance: trackingValue,
})
}
if trackingAnnotation {
cm.SetAnnotations(map[string]string{
common.AnnotationKeyAppInstance: trackingValue,
})
}
return kube.MustToUnstructured(&cm)
}
func Test_updateTracking(t *testing.T) {
type args struct {
bak *unstructured.Unstructured
live *unstructured.Unstructured
}
tests := []struct {
name string
args args
expected *unstructured.Unstructured
}{
{
name: "update annotation when present in live",
args: args{
bak: newBackupObject("bak", false, true),
live: newBackupObject("live", false, true),
},
expected: newBackupObject("live", false, true),
},
{
name: "update default label when present in live",
args: args{
bak: newBackupObject("bak", true, true),
live: newBackupObject("live", true, true),
},
expected: newBackupObject("live", true, true),
},
{
name: "do not update if live object does not have tracking",
args: args{
bak: newBackupObject("bak", true, true),
live: newBackupObject("live", false, false),
},
expected: newBackupObject("bak", true, true),
},
{
name: "do not update if bak object does not have tracking",
args: args{
bak: newBackupObject("bak", false, false),
live: newBackupObject("live", true, true),
},
expected: newBackupObject("bak", false, false),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
updateTracking(tt.args.bak, tt.args.live)
assert.Equal(t, tt.expected, tt.args.bak)
})
}
}

View File

@@ -183,12 +183,13 @@ func getControllerReplicas(ctx context.Context, kubeClient *kubernetes.Clientset
func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
shard int
replicas int
shardingAlgorithm string
clientConfig clientcmd.ClientConfig
cacheSrc func() (*appstatecache.Cache, error)
portForwardRedis bool
shard int
replicas int
shardingAlgorithm string
clientConfig clientcmd.ClientConfig
cacheSrc func() (*appstatecache.Cache, error)
portForwardRedis bool
redisCompressionStr string
)
command := cobra.Command{
Use: "shards",
@@ -212,7 +213,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
if replicas == 0 {
return
}
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, clientOpts.RedisCompression)
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
errors.CheckError(err)
if len(clusters) == 0 {
return
@@ -233,6 +234,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
// we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
// nolint:errcheck
command.ParseFlags(os.Args[1:])
redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
return &command
}
@@ -464,12 +466,13 @@ func NewClusterDisableNamespacedMode() *cobra.Command {
func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
shard int
replicas int
shardingAlgorithm string
clientConfig clientcmd.ClientConfig
cacheSrc func() (*appstatecache.Cache, error)
portForwardRedis bool
shard int
replicas int
shardingAlgorithm string
clientConfig clientcmd.ClientConfig
cacheSrc func() (*appstatecache.Cache, error)
portForwardRedis bool
redisCompressionStr string
)
command := cobra.Command{
Use: "stats",
@@ -499,7 +502,7 @@ argocd admin cluster stats target-cluster`,
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
errors.CheckError(err)
}
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, clientOpts.RedisCompression)
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
errors.CheckError(err)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
@@ -521,6 +524,7 @@ argocd admin cluster stats target-cluster`,
// we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
// nolint:errcheck
command.ParseFlags(os.Args[1:])
redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
return &command
}

View File

@@ -12,14 +12,17 @@ import (
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
"github.com/argoproj/argo-cd/v2/common"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/argoproj/argo-cd/v2/util/cache"
"github.com/argoproj/argo-cd/v2/util/env"
"github.com/argoproj/argo-cd/v2/util/errors"
)
func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
port int
address string
clientConfig clientcmd.ClientConfig
port int
address string
compressionStr string
clientConfig clientcmd.ClientConfig
)
cmd := &cobra.Command{
Use: "dashboard",
@@ -27,8 +30,10 @@ func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
Run: func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()
compression, err := cache.CompressionTypeFromString(compressionStr)
errors.CheckError(err)
clientOpts.Core = true
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, clientConfig))
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression, clientConfig))
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
<-ctx.Done()
},
@@ -45,5 +50,6 @@ $ argocd admin dashboard --redis-compress gzip
clientConfig = cli.AddKubectlFlagsToSet(cmd.Flags())
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
cmd.Flags().StringVar(&address, "address", common.DefaultAddressAdminDashboard, "Listen on given address")
cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
return cmd
}

View File

@@ -29,7 +29,7 @@ type rbacTrait struct {
}
// Provide a mapping of short-hand resource names to their RBAC counterparts
var resourceMap = map[string]string{
var resourceMap map[string]string = map[string]string{
"account": rbacpolicy.ResourceAccounts,
"app": rbacpolicy.ResourceApplications,
"apps": rbacpolicy.ResourceApplications,
@@ -53,17 +53,8 @@ var resourceMap = map[string]string{
"repository": rbacpolicy.ResourceRepositories,
}
var projectScoped = map[string]bool{
rbacpolicy.ResourceApplications: true,
rbacpolicy.ResourceApplicationSets: true,
rbacpolicy.ResourceLogs: true,
rbacpolicy.ResourceExec: true,
rbacpolicy.ResourceClusters: true,
rbacpolicy.ResourceRepositories: true,
}
// List of allowed RBAC resources
var validRBACResourcesActions = map[string]actionTraitMap{
var validRBACResourcesActions map[string]actionTraitMap = map[string]actionTraitMap{
rbacpolicy.ResourceAccounts: accountsActions,
rbacpolicy.ResourceApplications: applicationsActions,
rbacpolicy.ResourceApplicationSets: defaultCRUDActions,
@@ -445,15 +436,14 @@ func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPoli
}
}
// Some project scoped resources have a special notation - for simplicity's sake,
// Application resources have a special notation - for simplicity's sake,
// if user gives no sub-resource (or specifies simple '*'), we construct
// the required notation by setting subresource to '*/*'.
if projectScoped[realResource] {
if realResource == rbacpolicy.ResourceApplications {
if subResource == "*" || subResource == "" {
subResource = "*/*"
}
}
if realResource == rbacpolicy.ResourceLogs {
} else if realResource == rbacpolicy.ResourceLogs {
if isLogRbacEnforced != nil && !isLogRbacEnforced() {
return true
}

View File

@@ -235,14 +235,6 @@ func Test_PolicyFromK8s(t *testing.T) {
ok := checkPolicy("log-allow-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, falseLogRbacEnforce)
require.True(t, ok)
})
t.Run("get logs", func(t *testing.T) {
ok := checkPolicy("role:test", "get", "logs", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
require.True(t, ok)
})
t.Run("get logs", func(t *testing.T) {
ok := checkPolicy("role:test", "get", "logs", "", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
require.True(t, ok)
})
t.Run("create exec", func(t *testing.T) {
ok := checkPolicy("role:test", "create", "exec", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
require.True(t, ok)

View File

@@ -138,7 +138,7 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
argocd app create nginx-ingress --repo https://charts.helm.sh/stable --helm-chart nginx-ingress --revision 1.24.3 --dest-namespace default --dest-server https://kubernetes.default.svc
# Create a Kustomize app
argocd app create kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image quay.io/argoprojlabs/argocd-e2e-container:0.1
argocd app create kustomize-guestbook --repo https://github.com/argoproj/argocd-example-apps.git --path kustomize-guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --kustomize-image gcr.io/heptio-images/ks-guestbook-demo:0.1
# Create a MultiSource app while yaml file contains an application with multiple sources
argocd app create guestbook --file <path-to-yaml-file>
@@ -294,7 +294,7 @@ func parentChildDetails(appIf application.ApplicationServiceClient, ctx context.
return mapUidToNode, mapParentToChild, parentNode
}
func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx context.Context, windows *argoappv1.SyncWindows, showOperation bool, showParams bool, sourcePosition int) {
func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx context.Context, windows *argoappv1.SyncWindows, showOperation bool, showParams bool) {
aURL := appURL(ctx, acdClient, app.Name)
printAppSummaryTable(app, aURL, windows)
@@ -309,21 +309,20 @@ func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx
fmt.Println()
printOperationResult(app.Status.OperationState)
}
if showParams {
printParams(app, sourcePosition)
if !app.Spec.HasMultipleSources() && showParams {
printParams(app)
}
}
// NewApplicationGetCommand returns a new instance of an `argocd app get` command
func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
refresh bool
hardRefresh bool
output string
showParams bool
showOperation bool
appNamespace string
sourcePosition int
refresh bool
hardRefresh bool
output string
showParams bool
showOperation bool
appNamespace string
)
command := &cobra.Command{
Use: "get APPNAME",
@@ -344,9 +343,6 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
# Show application parameters and overrides
argocd app get my-app --show-params
# Show application parameters and overrides for a source at position 1 under spec.sources of app my-app
argocd app get my-app --show-params --source-position 1
# Refresh application data when retrieving
argocd app get my-app --refresh
@@ -377,17 +373,8 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
Refresh: getRefreshType(refresh, hardRefresh),
AppNamespace: &appNs,
})
errors.CheckError(err)
// check for source position if --show-params is set
if app.Spec.HasMultipleSources() && showParams {
if sourcePosition <= 0 {
errors.CheckError(fmt.Errorf("Source position should be specified and must be greater than 0 for applications with multiple sources"))
}
if len(app.Spec.GetSources()) < sourcePosition {
errors.CheckError(fmt.Errorf("Source position should be less than the number of sources in the application"))
}
}
errors.CheckError(err)
pConn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
defer argoio.Close(pConn)
@@ -401,7 +388,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
err := PrintResource(app, output)
errors.CheckError(err)
case "wide", "":
printHeader(acdClient, app, ctx, windows, showOperation, showParams, sourcePosition)
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
if len(app.Status.Resources) > 0 {
fmt.Println()
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
@@ -409,14 +396,14 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
_ = w.Flush()
}
case "tree":
printHeader(acdClient, app, ctx, windows, showOperation, showParams, sourcePosition)
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
if len(mapUidToNode) > 0 {
fmt.Println()
printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
}
case "tree=detailed":
printHeader(acdClient, app, ctx, windows, showOperation, showParams, sourcePosition)
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
if len(mapUidToNode) > 0 {
fmt.Println()
@@ -433,7 +420,6 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only get application from namespace")
command.Flags().IntVar(&sourcePosition, "source-position", -1, "Position of the source from the list of sources of the app. Counting starts at 1.")
return command
}
@@ -586,8 +572,8 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
var status string
var allow, deny, inactiveAllows bool
if windows.HasWindows() {
active, err := windows.Active()
if err == nil && active.HasWindows() {
active := windows.Active()
if active.HasWindows() {
for _, w := range *active {
if w.Kind == "deny" {
deny = true
@@ -596,14 +582,13 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
}
}
}
inactiveAllowWindows, err := windows.InactiveAllows()
if err == nil && inactiveAllowWindows.HasWindows() {
if windows.InactiveAllows().HasWindows() {
inactiveAllows = true
}
s := windows.CanSync(true)
if deny || !deny && !allow && inactiveAllows {
s, err := windows.CanSync(true)
if err == nil && s {
if s {
status = "Manual Allowed"
} else {
status = "Sync Denied"
@@ -716,22 +701,9 @@ func truncateString(str string, num int) string {
}
// printParams prints parameters and overrides
func printParams(app *argoappv1.Application, sourcePosition int) {
var source *argoappv1.ApplicationSource
if app.Spec.HasMultipleSources() {
// Get the source by the sourcePosition whose params you'd like to print
source = app.Spec.GetSourcePtrByPosition(sourcePosition)
if source == nil {
source = &argoappv1.ApplicationSource{}
}
} else {
src := app.Spec.GetSource()
source = &src
}
if source.Helm != nil {
printHelmParams(source.Helm)
func printParams(app *argoappv1.Application) {
if app.Spec.GetSource().Helm != nil {
printHelmParams(app.Spec.GetSource().Helm)
}
}
@@ -821,9 +793,9 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
errors.CheckError(err)
},
}
command.Flags().IntVar(&sourcePosition, "source-position", -1, "Position of the source from the list of sources of the app. Counting starts at 1.")
cmdutil.AddAppFlags(command, &appOpts)
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Set application parameters in namespace")
command.Flags().IntVar(&sourcePosition, "source-position", -1, "Position of the source from the list of sources of the app. Counting starts at 1.")
return command
}
@@ -1283,7 +1255,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
if diffOptions.local != "" {
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
} else if diffOptions.revision != "" || len(diffOptions.revisions) > 0 {
} else if diffOptions.revision != "" || (diffOptions.revisions != nil && len(diffOptions.revisions) > 0) {
var unstructureds []*unstructured.Unstructured
for _, mfst := range diffOptions.res.Manifests {
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
@@ -1376,7 +1348,7 @@ func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[
}
if local, ok := objs[key]; ok || live != nil {
if local != nil && !kube.IsCRD(local) {
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()), argoSettings.GetInstallationID())
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()))
errors.CheckError(err)
}
@@ -1934,7 +1906,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
if len(projects) != 0 {
errMsg += fmt.Sprintf(" projects %v", projects)
}
log.Fatal(errMsg)
log.Fatalf(errMsg)
}
for _, i := range list.Items {

View File

@@ -918,83 +918,35 @@ func TestPrintAppConditions(t *testing.T) {
}
func TestPrintParams(t *testing.T) {
testCases := []struct {
name string
app *v1alpha1.Application
sourcePosition int
expectedOutput string
}{
{
name: "Single Source application with valid helm parameters",
app: &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "name1",
Value: "value1",
},
{
Name: "name2",
Value: "value2",
},
{
Name: "name3",
Value: "value3",
},
output, _ := captureOutput(func() error {
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "name1",
Value: "value1",
},
{
Name: "name2",
Value: "value2",
},
{
Name: "name3",
Value: "value3",
},
},
},
},
},
sourcePosition: -1,
expectedOutput: "\n\nNAME VALUE\nname1 value1\nname2 value2\nname3 value3\n",
},
{
name: "Multi-source application with a valid Source Position",
app: &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Sources: []v1alpha1.ApplicationSource{
{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "nameA",
Value: "valueA",
},
},
},
},
{
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{
Name: "nameB",
Value: "valueB",
},
},
},
},
},
},
},
sourcePosition: 1,
expectedOutput: "\n\nNAME VALUE\nnameA valueA\n",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
output, _ := captureOutput(func() error {
printParams(tc.app, tc.sourcePosition)
return nil
})
if output != tc.expectedOutput {
t.Fatalf("Incorrect print params output %q, should be %q\n", output, tc.expectedOutput)
}
})
}
printParams(app)
return nil
})
expectation := "\n\nNAME VALUE\nname1 value1\nname2 value2\nname3 value3\n"
if output != expectation {
t.Fatalf("Incorrect print params output %q, should be %q", output, expectation)
}
}

View File

@@ -81,14 +81,14 @@ func Test_PrintResource(t *testing.T) {
return err
})
require.NoError(t, err)
assert.YAMLEq(t, expectYamlSingle, str)
assert.Equal(t, expectYamlSingle, str)
str, err = captureOutput(func() error {
err := PrintResource(testResource, "json")
return err
})
require.NoError(t, err)
assert.JSONEq(t, expectJsonSingle, str)
assert.Equal(t, expectJsonSingle, str)
err = PrintResource(testResource, "unknown")
require.Error(t, err)
@@ -116,28 +116,28 @@ func Test_PrintResourceList(t *testing.T) {
return err
})
require.NoError(t, err)
assert.YAMLEq(t, expectYamlList, str)
assert.Equal(t, expectYamlList, str)
str, err = captureOutput(func() error {
err := PrintResourceList(testResource, "json", false)
return err
})
require.NoError(t, err)
assert.JSONEq(t, expectJsonList, str)
assert.Equal(t, expectJsonList, str)
str, err = captureOutput(func() error {
err := PrintResourceList(testResource2, "yaml", true)
return err
})
require.NoError(t, err)
assert.YAMLEq(t, expectYamlSingle, str)
assert.Equal(t, expectYamlSingle, str)
str, err = captureOutput(func() error {
err := PrintResourceList(testResource2, "json", true)
return err
})
require.NoError(t, err)
assert.JSONEq(t, expectJsonSingle, str)
assert.Equal(t, expectJsonSingle, str)
err = PrintResourceList(testResource, "unknown", false)
require.Error(t, err)

View File

@@ -14,10 +14,8 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtimeUtil "k8s.io/apimachinery/pkg/util/runtime"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
cache2 "k8s.io/client-go/tools/cache"
@@ -50,7 +48,6 @@ type forwardCacheClient struct {
err error
redisHaProxyName string
redisName string
redisPassword string
}
func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error {
@@ -67,7 +64,7 @@ func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error)
return
}
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort), Password: c.redisPassword})
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort)})
c.client = cache.NewRedisCache(redisClient, time.Hour, c.compression)
})
if c.err != nil {
@@ -129,7 +126,7 @@ func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.R
}
repoServerName := c.repoServerName
repoServererviceLabelSelector := common.LabelKeyComponentRepoServer + "=" + common.LabelValueComponentRepoServer
repoServerServices, err := c.kubeClientset.CoreV1().Services(c.namespace).List(context.Background(), metaV1.ListOptions{LabelSelector: repoServererviceLabelSelector})
repoServerServices, err := c.kubeClientset.CoreV1().Services(c.namespace).List(context.Background(), v1.ListOptions{LabelSelector: repoServererviceLabelSelector})
if err != nil {
c.err = err
return
@@ -177,7 +174,7 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
//
// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will
// not start the local server.
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, clientConfig clientcmd.ClientConfig) error {
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType, clientConfig clientcmd.ClientConfig) error {
if clientConfig == nil {
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
clientConfig = cli.AddKubectlFlagsToSet(flags)
@@ -204,7 +201,7 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
}
// get rid of logging error handler
runtimeUtil.ErrorHandlers = runtimeUtil.ErrorHandlers[1:]
runtime.ErrorHandlers = runtime.ErrorHandlers[1:]
cli.SetLogLevel(log.ErrorLevel.String())
log.SetLevel(log.ErrorLevel)
os.Setenv(v1alpha1.EnvVarFakeInClusterConfig, "true")
@@ -239,18 +236,7 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
return fmt.Errorf("error creating kubernetes dynamic clientset: %w", err)
}
scheme := runtime.NewScheme()
err = v1alpha1.AddToScheme(scheme)
if err != nil {
return fmt.Errorf("error adding argo resources to scheme: %w", err)
}
err = corev1.AddToScheme(scheme)
if err != nil {
return fmt.Errorf("error adding corev1 resources to scheme: %w", err)
}
controllerClientset, err := client.New(restConfig, client.Options{
Scheme: scheme,
})
controllerClientset, err := client.New(restConfig, client.Options{})
if err != nil {
return fmt.Errorf("error creating kubernetes controller clientset: %w", err)
}
@@ -265,12 +251,12 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
if err != nil {
return fmt.Errorf("error running miniredis: %w", err)
}
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression, redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName}), time.Hour)
redisOptions := &redis.Options{Addr: mr.Addr()}
if err = common.SetOptionalRedisPasswordFromKubeConfig(ctx, kubeClientset, namespace, redisOptions); err != nil {
log.Warnf("Failed to fetch & set redis password for namespace %s: %v", namespace, err)
}
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: cache.RedisCompressionType(clientOpts.RedisCompression), redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName, redisPassword: redisOptions.Password}), time.Hour)
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
EnableGZip: false,
Namespace: namespace,
@@ -321,7 +307,7 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
// If we're in core mode, start the API server on the fly and configure the client `opts` to use it.
// If we're not in core mode, this function call will do nothing.
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, nil)
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone, nil)
if err != nil {
log.Fatal(err)
}

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"os"
"slices"
"strings"
"text/tabwriter"
"time"
@@ -81,8 +80,6 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
command.AddCommand(NewProjectRemoveOrphanedIgnoreCommand(clientOpts))
command.AddCommand(NewProjectAddSourceNamespace(clientOpts))
command.AddCommand(NewProjectRemoveSourceNamespace(clientOpts))
command.AddCommand(NewProjectAddDestinationServiceAccountCommand(clientOpts))
command.AddCommand(NewProjectRemoveDestinationServiceAccountCommand(clientOpts))
return command
}
@@ -802,7 +799,7 @@ func printProjectNames(projects []v1alpha1.AppProject) {
// Print table of project info
func printProjectTable(projects []v1alpha1.AppProject) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\tSIGNATURE-KEYS\tORPHANED-RESOURCES\tDESTINATION-SERVICE-ACCOUNTS\n")
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\tSIGNATURE-KEYS\tORPHANED-RESOURCES\n")
for _, p := range projects {
printProjectLine(w, &p)
}
@@ -858,7 +855,7 @@ func formatOrphanedResources(p *v1alpha1.AppProject) string {
}
func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
var destinations, destinationServiceAccounts, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys string
var destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys string
switch len(p.Spec.Destinations) {
case 0:
destinations = "<none>"
@@ -867,14 +864,6 @@ func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
default:
destinations = fmt.Sprintf("%d destinations", len(p.Spec.Destinations))
}
switch len(p.Spec.DestinationServiceAccounts) {
case 0:
destinationServiceAccounts = "<none>"
case 1:
destinationServiceAccounts = fmt.Sprintf("%s,%s,%s", p.Spec.DestinationServiceAccounts[0].Server, p.Spec.DestinationServiceAccounts[0].Namespace, p.Spec.DestinationServiceAccounts[0].DefaultServiceAccount)
default:
destinationServiceAccounts = fmt.Sprintf("%d destinationServiceAccounts", len(p.Spec.DestinationServiceAccounts))
}
switch len(p.Spec.SourceRepos) {
case 0:
sourceRepos = "<none>"
@@ -903,7 +892,7 @@ func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
default:
signatureKeys = fmt.Sprintf("%d key(s)", len(p.Spec.SignatureKeys))
}
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys, formatOrphanedResources(p), destinationServiceAccounts)
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys, formatOrphanedResources(p))
}
func printProject(p *v1alpha1.AppProject, scopedRepositories []*v1alpha1.Repository, scopedClusters []*v1alpha1.Cluster) {
@@ -1093,122 +1082,3 @@ func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
}
return command
}
// NewProjectAddDestinationServiceAccountCommand returns a new instance of an `argocd proj add-destination-service-account` command
func NewProjectAddDestinationServiceAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var serviceAccountNamespace string
buildApplicationDestinationServiceAccount := func(destination string, namespace string, serviceAccount string, serviceAccountNamespace string) v1alpha1.ApplicationDestinationServiceAccount {
if serviceAccountNamespace != "" {
return v1alpha1.ApplicationDestinationServiceAccount{
Server: destination,
Namespace: namespace,
DefaultServiceAccount: fmt.Sprintf("%s:%s", serviceAccountNamespace, serviceAccount),
}
} else {
return v1alpha1.ApplicationDestinationServiceAccount{
Server: destination,
Namespace: namespace,
DefaultServiceAccount: serviceAccount,
}
}
}
command := &cobra.Command{
Use: "add-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT",
Short: "Add project destination's default service account",
Example: templates.Examples(`
# Add project destination service account (SERVICE_ACCOUNT) for a server URL (SERVER) in the specified namespace (NAMESPACE) on the project with name PROJECT
argocd proj add-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT
# Add project destination service account (SERVICE_ACCOUNT) from a different namespace
argocd proj add-destination PROJECT SERVER NAMESPACE SERVICE_ACCOUNT --service-account-namespace <service_account_namespace>
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
if len(args) != 4 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
server := args[1]
namespace := args[2]
serviceAccount := args[3]
if strings.Contains(serviceAccountNamespace, "*") {
log.Fatal("service-account-namespace for DestinationServiceAccount must not contain wildcards")
}
if strings.Contains(serviceAccount, "*") {
log.Fatal("ServiceAccount for DestinationServiceAccount must not contain wildcards")
}
destinationServiceAccount := buildApplicationDestinationServiceAccount(server, namespace, serviceAccount, serviceAccountNamespace)
conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
for _, dest := range proj.Spec.DestinationServiceAccounts {
dstServerExist := destinationServiceAccount.Server != "" && dest.Server == destinationServiceAccount.Server
dstServiceAccountExist := destinationServiceAccount.DefaultServiceAccount != "" && dest.DefaultServiceAccount == destinationServiceAccount.DefaultServiceAccount
if dest.Namespace == destinationServiceAccount.Namespace && dstServerExist && dstServiceAccountExist {
log.Fatal("Specified destination service account is already defined in project")
}
}
proj.Spec.DestinationServiceAccounts = append(proj.Spec.DestinationServiceAccounts, destinationServiceAccount)
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
},
}
command.Flags().StringVar(&serviceAccountNamespace, "service-account-namespace", "", "Use service-account-namespace as namespace where the service account is present")
return command
}
// NewProjectRemoveDestinationCommand returns a new instance of an `argocd proj remove-destination-service-account` command
func NewProjectRemoveDestinationServiceAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
command := &cobra.Command{
Use: "remove-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT",
Short: "Remove default destination service account from the project",
Example: templates.Examples(`
# Remove the destination service account (SERVICE_ACCOUNT) from the specified destination (SERVER and NAMESPACE combination) on the project with name PROJECT
argocd proj remove-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
if len(args) != 4 {
c.HelpFunc()(c, args)
os.Exit(1)
}
projName := args[0]
server := args[1]
namespace := args[2]
serviceAccount := args[3]
conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
defer argoio.Close(conn)
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName})
errors.CheckError(err)
originalLength := len(proj.Spec.DestinationServiceAccounts)
proj.Spec.DestinationServiceAccounts = slices.DeleteFunc(proj.Spec.DestinationServiceAccounts,
func(destServiceAccount v1alpha1.ApplicationDestinationServiceAccount) bool {
return destServiceAccount.Namespace == namespace &&
destServiceAccount.Server == server &&
destServiceAccount.DefaultServiceAccount == serviceAccount
},
)
if originalLength != len(proj.Spec.DestinationServiceAccounts) {
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
errors.CheckError(err)
} else {
log.Fatal("Specified destination service account does not exist in project")
}
},
}
return command
}

View File

@@ -352,10 +352,9 @@ func printSyncWindows(proj *v1alpha1.AppProject) {
fmt.Fprintf(w, fmtStr, headers...)
if proj.Spec.SyncWindows.HasWindows() {
for i, window := range proj.Spec.SyncWindows {
isActive, _ := window.Active()
vals := []interface{}{
strconv.Itoa(i),
formatBoolOutput(isActive),
formatBoolOutput(window.Active()),
window.Kind,
window.Schedule,
window.Duration,

View File

@@ -187,7 +187,6 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"")
command.Flags().StringVar(&gcpServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
command.Flags().BoolVar(&repo.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force basic auth when connecting via HTTP")
command.Flags().StringVar(&repo.Proxy, "proxy-url", "", "If provided, this URL will be used to connect via proxy")
return command
}

View File

@@ -3,8 +3,6 @@ package commands
import (
"fmt"
"github.com/argoproj/argo-cd/v2/util/cache"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
@@ -89,8 +87,6 @@ func NewCommand() *cobra.Command {
command.PersistentFlags().StringVar(&clientOpts.RedisName, "redis-name", env.StringFromEnv(common.EnvRedisName, common.DefaultRedisName), fmt.Sprintf("Name of the Redis deployment; set this or the %s environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisName))
command.PersistentFlags().StringVar(&clientOpts.RepoServerName, "repo-server-name", env.StringFromEnv(common.EnvRepoServerName, common.DefaultRepoServerName), fmt.Sprintf("Name of the Argo CD Repo server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvRepoServerName))
command.PersistentFlags().StringVar(&clientOpts.RedisCompression, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
clientOpts.KubeOverrides = &clientcmd.ConfigOverrides{}
command.PersistentFlags().StringVar(&clientOpts.KubeOverrides.CurrentContext, "kube-context", "", "Directs the command to the given kube-context")

View File

@@ -4,10 +4,10 @@ import (
"os"
"path/filepath"
"github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/spf13/cobra"
_ "go.uber.org/automaxprocs"
appcontroller "github.com/argoproj/argo-cd/v2/cmd/argocd-application-controller/commands"
applicationset "github.com/argoproj/argo-cd/v2/cmd/argocd-applicationset-controller/commands"
cmpserver "github.com/argoproj/argo-cd/v2/cmd/argocd-cmp-server/commands"
@@ -31,12 +31,9 @@ func main() {
if val := os.Getenv(binaryNameEnv); val != "" {
binaryName = val
}
isCLI := false
switch binaryName {
case "argocd", "argocd-linux-amd64", "argocd-darwin-amd64", "argocd-windows-amd64.exe":
command = cli.NewCommand()
isCLI = true
case "argocd-server":
command = apiserver.NewCommand()
case "argocd-application-controller":
@@ -45,24 +42,19 @@ func main() {
command = reposerver.NewCommand()
case "argocd-cmp-server":
command = cmpserver.NewCommand()
isCLI = true
case "argocd-dex":
command = dex.NewCommand()
case "argocd-notifications":
command = notification.NewCommand()
case "argocd-git-ask-pass":
command = gitaskpass.NewCommand()
isCLI = true
case "argocd-applicationset-controller":
command = applicationset.NewCommand()
case "argocd-k8s-auth":
command = k8sauth.NewCommand()
isCLI = true
default:
command = cli.NewCommand()
isCLI = true
}
util.SetAutoMaxProcs(isCLI)
if err := command.Execute(); err != nil {
os.Exit(1)

View File

@@ -9,8 +9,6 @@ import (
"strings"
"time"
"go.uber.org/automaxprocs/maxprocs"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
@@ -90,19 +88,6 @@ type AppOptions struct {
ref string
}
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
// It suppresses logs for CLI binaries and logs the setting for services.
func SetAutoMaxProcs(isCLI bool) {
if isCLI {
_, _ = maxprocs.Set() // Intentionally ignore errors for CLI binaries
} else {
_, err := maxprocs.Set(maxprocs.Logger(log.Infof))
if err != nil {
log.Errorf("Error setting GOMAXPROCS: %v", err)
}
}
}
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
command.Flags().StringVar(&opts.repoURL, "repo", "", "Repository URL, ignored if a file is set")
command.Flags().StringVar(&opts.appPath, "path", "", "Path in repository to the app directory, ignored if a file is set")

View File

@@ -1,11 +1,10 @@
package util
import (
"bytes"
"log"
"os"
"testing"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -530,27 +529,3 @@ func TestFilterResources(t *testing.T) {
assert.Nil(t, filteredResources)
})
}
func TestSetAutoMaxProcs(t *testing.T) {
t.Run("CLI mode ignores errors", func(t *testing.T) {
logBuffer := &bytes.Buffer{}
oldLogger := log.Default()
log.SetOutput(logBuffer)
defer log.SetOutput(oldLogger.Writer())
SetAutoMaxProcs(true)
assert.Empty(t, logBuffer.String(), "Expected no log output when isCLI is true")
})
t.Run("Non-CLI mode logs error on failure", func(t *testing.T) {
logBuffer := &bytes.Buffer{}
oldLogger := log.Default()
log.SetOutput(logBuffer)
defer log.SetOutput(oldLogger.Writer())
SetAutoMaxProcs(false)
assert.NotContains(t, logBuffer.String(), "Error setting GOMAXPROCS", "Unexpected log output detected")
})
}

View File

@@ -20,12 +20,11 @@ import (
)
type ProjectOpts struct {
Description string
destinations []string
destinationServiceAccounts []string
Sources []string
SignatureKeys []string
SourceNamespaces []string
Description string
destinations []string
Sources []string
SignatureKeys []string
SourceNamespaces []string
orphanedResourcesEnabled bool
orphanedResourcesWarn bool
@@ -48,8 +47,6 @@ func AddProjFlags(command *cobra.Command, opts *ProjectOpts) {
command.Flags().StringArrayVar(&opts.allowedNamespacedResources, "allow-namespaced-resource", []string{}, "List of allowed namespaced resources")
command.Flags().StringArrayVar(&opts.deniedNamespacedResources, "deny-namespaced-resource", []string{}, "List of denied namespaced resources")
command.Flags().StringSliceVar(&opts.SourceNamespaces, "source-namespaces", []string{}, "List of source namespaces for applications")
command.Flags().StringArrayVar(&opts.destinationServiceAccounts, "dest-service-accounts", []string{},
"Destination server, namespace and target service account (e.g. https://192.168.99.100:8443,default,default-sa)")
}
func getGroupKindList(values []string) []v1.GroupKind {
@@ -96,23 +93,6 @@ func (opts *ProjectOpts) GetDestinations() []v1alpha1.ApplicationDestination {
return destinations
}
func (opts *ProjectOpts) GetDestinationServiceAccounts() []v1alpha1.ApplicationDestinationServiceAccount {
destinationServiceAccounts := make([]v1alpha1.ApplicationDestinationServiceAccount, 0)
for _, destStr := range opts.destinationServiceAccounts {
parts := strings.Split(destStr, ",")
if len(parts) != 3 {
log.Fatalf("Expected destination service account of the form: server,namespace, defaultServiceAccount. Received: %s", destStr)
} else {
destinationServiceAccounts = append(destinationServiceAccounts, v1alpha1.ApplicationDestinationServiceAccount{
Server: parts[0],
Namespace: parts[1],
DefaultServiceAccount: parts[2],
})
}
}
return destinationServiceAccounts
}
// GetSignatureKeys TODO: Get configured keys and emit warning when a key is specified that is not configured
func (opts *ProjectOpts) GetSignatureKeys() []v1alpha1.SignatureKey {
signatureKeys := make([]v1alpha1.SignatureKey, 0)
@@ -186,8 +166,6 @@ func SetProjSpecOptions(flags *pflag.FlagSet, spec *v1alpha1.AppProjectSpec, pro
spec.NamespaceResourceBlacklist = projOpts.GetDeniedNamespacedResources()
case "source-namespaces":
spec.SourceNamespaces = projOpts.GetSourceNamespaces()
case "dest-service-accounts":
spec.DestinationServiceAccounts = projOpts.GetDestinationServiceAccounts()
}
})
if flags.Changed("orphaned-resources") || flags.Changed("orphaned-resources-warn") {

View File

@@ -5,8 +5,6 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
func TestProjectOpts_ResourceLists(t *testing.T) {
@@ -24,27 +22,3 @@ func TestProjectOpts_ResourceLists(t *testing.T) {
[]v1.GroupKind{{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}}, opts.GetDeniedClusterResources(),
)
}
func TestProjectOpts_GetDestinationServiceAccounts(t *testing.T) {
opts := ProjectOpts{
destinationServiceAccounts: []string{
"https://192.168.99.100:8443,test-ns,test-sa",
"https://kubernetes.default.svc.local:6443,guestbook,guestbook-sa",
},
}
assert.ElementsMatch(t,
[]v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://192.168.99.100:8443",
Namespace: "test-ns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.default.svc.local:6443",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
}, opts.GetDestinationServiceAccounts(),
)
}

View File

@@ -45,7 +45,7 @@ func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
command.Flags().StringVar(&opts.GithubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application")
command.Flags().StringVar(&opts.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3")
command.Flags().StringVar(&opts.Proxy, "proxy", "", "use proxy to access repository")
command.Flags().StringVar(&opts.NoProxy, "no-proxy", "", "don't access these targets via proxy")
command.Flags().StringVar(&opts.Proxy, "no-proxy", "", "don't access these targets via proxy")
command.Flags().StringVar(&opts.GCPServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
command.Flags().BoolVar(&opts.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force use of basic auth when connecting repository via HTTP")
}

View File

@@ -178,7 +178,6 @@ const (
// AnnotationKeyAppInstance is the Argo CD application name is used as the instance name
AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id"
AnnotationInstallationID = "argocd.argoproj.io/installation-id"
// AnnotationCompareOptions is a comma-separated list of options for comparison
AnnotationCompareOptions = "argocd.argoproj.io/compare-options"

View File

@@ -130,7 +130,6 @@ type ApplicationController struct {
statusHardRefreshTimeout time.Duration
statusRefreshJitter time.Duration
selfHealTimeout time.Duration
selfHealBackOff *wait.Backoff
repoClientset apiclient.Clientset
db db.ArgoDB
settingsMgr *settings_util.SettingsManager
@@ -161,12 +160,10 @@ func NewApplicationController(
appHardResyncPeriod time.Duration,
appResyncJitter time.Duration,
selfHealTimeout time.Duration,
selfHealBackoff *wait.Backoff,
repoErrorGracePeriod time.Duration,
metricsPort int,
metricsCacheExpiration time.Duration,
metricsApplicationLabels []string,
metricsApplicationConditions []string,
kubectlParallelismLimit int64,
persistResourceHealth bool,
clusterSharding sharding.ClusterShardingCache,
@@ -175,7 +172,6 @@ func NewApplicationController(
serverSideDiff bool,
dynamicClusterDistributionEnabled bool,
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
enableK8sEvent []string,
) (*ApplicationController, error) {
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
db := db.NewDB(namespace, settingsMgr, kubeClientset)
@@ -200,10 +196,9 @@ func NewApplicationController(
statusRefreshJitter: appResyncJitter,
refreshRequestedApps: make(map[string]CompareWith),
refreshRequestedAppsMutex: &sync.Mutex{},
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController, enableK8sEvent),
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
settingsMgr: settingsMgr,
selfHealTimeout: selfHealTimeout,
selfHealBackOff: selfHealBackoff,
clusterSharding: clusterSharding,
projByNameCache: sync.Map{},
applicationNamespaces: applicationNamespaces,
@@ -284,7 +279,7 @@ func NewApplicationController(
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels, metricsApplicationConditions)
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
if err != nil {
return nil, err
}
@@ -906,7 +901,7 @@ func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith
key := ctrl.toAppKey(appName)
if compareWith != nil && after != nil {
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, *compareWith), *after)
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, compareWith), *after)
} else {
if compareWith != nil {
ctrl.refreshRequestedAppsMutex.Lock()
@@ -1694,9 +1689,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
app.Status.Summary = tree.GetSummary(app)
}
canSync, _ := project.Spec.SyncWindows.Matches(app).CanSync(false)
if canSync {
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionUpdated)
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
setOpMs = opMS
if syncErrCond != nil {
app.Status.SetConditions(
@@ -1919,7 +1913,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
}
// autoSync will initiate a sync operation for an application configured with automated sync
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, revisionUpdated bool) (*appv1.ApplicationCondition, time.Duration) {
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) (*appv1.ApplicationCondition, time.Duration) {
logCtx := getAppLog(app)
ts := stats.NewTimingStats()
defer func() {
@@ -1973,7 +1967,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
desiredCommitSHA := syncStatus.Revision
desiredCommitSHAsMS := syncStatus.Revisions
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources(), revisionUpdated)
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources())
ts.AddCheckpoint("already_attempted_sync_ms")
op := appv1.Operation{
Sync: &appv1.SyncOperation{
@@ -1985,9 +1979,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
InitiatedBy: appv1.OperationInitiator{Automated: true},
Retry: appv1.RetryStrategy{Limit: 5},
}
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
}
if app.Spec.SyncPolicy.Retry != nil {
op.Retry = *app.Spec.SyncPolicy.Retry
}
@@ -2005,7 +1996,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
return nil, 0
} else if alreadyAttempted && selfHeal {
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
op.Sync.SelfHealAttemptsCount++
for _, resource := range resources {
if resource.Status != appv1.SyncStatusCodeSynced {
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
@@ -2032,7 +2022,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
}
if bAllNeedPrune {
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
logCtx.Warn(message)
logCtx.Warnf(message)
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
}
}
@@ -2072,26 +2062,17 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
// alreadyAttemptedSync returns whether the most recent sync was performed against the
// commitSHA and with the same app source config which are currently set in the app
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool, revisionUpdated bool) (bool, synccommon.OperationPhase) {
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool) (bool, synccommon.OperationPhase) {
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
return false, ""
}
if hasMultipleSources {
if revisionUpdated {
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
return false, ""
}
} else {
log.WithField("application", app.Name).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
return false, ""
}
} else {
if revisionUpdated {
log.WithField("application", app.Name).Infof("Executing compare of syncResult.Revision and commitSha because manifest changed: %v", commitSHA)
if app.Status.OperationState.SyncResult.Revision != commitSHA {
return false, ""
}
} else {
log.WithField("application", app.Name).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
if app.Status.OperationState.SyncResult.Revision != commitSHA {
return false, ""
}
}
@@ -2124,24 +2105,10 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool,
}
var retryAfter time.Duration
if ctrl.selfHealBackOff == nil {
if app.Status.OperationState.FinishedAt == nil {
retryAfter = ctrl.selfHealTimeout
} else {
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
}
if app.Status.OperationState.FinishedAt == nil {
retryAfter = ctrl.selfHealTimeout
} else {
backOff := *ctrl.selfHealBackOff
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
var delay time.Duration
for backOff.Steps > 0 {
delay = backOff.Step()
}
if app.Status.OperationState.FinishedAt == nil {
retryAfter = delay
} else {
retryAfter = delay - time.Since(app.Status.OperationState.FinishedAt.Time)
}
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
}
return retryAfter <= 0, retryAfter
}

View File

@@ -4,18 +4,16 @@ import (
"context"
"encoding/json"
"errors"
"fmt"
"testing"
"time"
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
"github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
"k8s.io/utils/ptr"
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
"github.com/argoproj/argo-cd/v2/common"
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
@@ -45,15 +43,12 @@ import (
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
"github.com/argoproj/argo-cd/v2/test"
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/settings"
)
var testEnableEventList []string = argo.DefaultEnableEventList()
type namespacedResource struct {
v1alpha1.ResourceNode
AppName string
@@ -69,7 +64,6 @@ type fakeData struct {
metricsCacheExpiration time.Duration
applicationNamespaces []string
updateRevisionForPathsResponse *apiclient.UpdateRevisionForPathsResponse
additionalObjs []runtime.Object
}
type MockKubectl struct {
@@ -139,9 +133,7 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
},
Data: data.configMapData,
}
runtimeObjs := []runtime.Object{&clust, &secret, &cm}
runtimeObjs = append(runtimeObjs, data.additionalObjs...)
kubeClient := fake.NewSimpleClientset(runtimeObjs...)
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
kubectl := &MockKubectl{Kubectl: &kubetest.MockKubectlCmd{}}
ctrl, err := NewApplicationController(
@@ -159,12 +151,10 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
time.Hour,
time.Second,
time.Minute,
nil,
time.Second*10,
common.DefaultPortArgoCDMetrics,
data.metricsCacheExpiration,
[]string{},
[]string{},
0,
true,
nil,
@@ -173,7 +163,6 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
false,
false,
normalizers.IgnoreNormalizerOpts{},
testEnableEventList,
)
db := &dbmocks.ArgoDB{}
db.On("GetApplicationControllerReplicas").Return(1)
@@ -565,7 +554,7 @@ func TestAutoSync(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -586,7 +575,7 @@ func TestMultiSourceSelfHeal(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revisions: []string{"z", "x", "v"},
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -602,7 +591,7 @@ func TestMultiSourceSelfHeal(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revisions: []string{"z", "x", "v"},
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -618,7 +607,7 @@ func TestAutoSyncNotAllowEmpty(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
assert.NotNil(t, cond)
}
@@ -631,7 +620,7 @@ func TestAutoSyncAllowEmpty(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
assert.Nil(t, cond)
}
@@ -645,7 +634,7 @@ func TestSkipAutoSync(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -660,7 +649,7 @@ func TestSkipAutoSync(t *testing.T) {
Status: v1alpha1.SyncStatusCodeSynced,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -676,7 +665,7 @@ func TestSkipAutoSync(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -693,7 +682,7 @@ func TestSkipAutoSync(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -719,7 +708,7 @@ func TestSkipAutoSync(t *testing.T) {
Status: v1alpha1.SyncStatusCodeOutOfSync,
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -735,7 +724,7 @@ func TestSkipAutoSync(t *testing.T) {
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{
{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync, RequiresPruning: true},
}, true)
})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -771,7 +760,7 @@ func TestAutoSyncIndicateError(t *testing.T) {
Source: *app.Spec.Source.DeepCopy(),
},
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
assert.NotNil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -814,7 +803,7 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
}
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
assert.Nil(t, cond)
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
require.NoError(t, err)
@@ -1385,25 +1374,6 @@ func TestNeedRefreshAppStatus(t *testing.T) {
assert.Equal(t, CompareWithRecent, compareWith)
})
t.Run("requesting refresh with delay gives correct compression level", func(t *testing.T) {
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
assert.False(t, needRefresh)
// use a one-off controller so other tests don't have a manual refresh request
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
// refresh app with a non-nil delay
// use zero-second delay to test the add later logic without waiting in the test
delay := time.Duration(0)
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), &delay)
ctrl.processAppComparisonTypeQueueItem()
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
assert.True(t, needRefresh)
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
assert.Equal(t, CompareWithRecent, compareWith)
})
t.Run("refresh application which status is not reconciled using latest commit", func(t *testing.T) {
app := app.DeepCopy()
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
@@ -2168,7 +2138,7 @@ func TestHelmValuesObjectHasReplaceStrategy(t *testing.T) {
app,
appModified)
require.NoError(t, err)
assert.JSONEq(t, `{"status":{"sync":{"comparedTo":{"source":{"helm":{"valuesObject":{"key":["value-modified1"]}}}}}}}`, string(patch))
assert.Equal(t, `{"status":{"sync":{"comparedTo":{"source":{"helm":{"valuesObject":{"key":["value-modified1"]}}}}}}}`, string(patch))
}
func TestAppStatusIsReplaced(t *testing.T) {
@@ -2200,79 +2170,3 @@ func TestAppStatusIsReplaced(t *testing.T) {
require.True(t, has)
require.Nil(t, val)
}
func TestAlreadyAttemptSync(t *testing.T) {
app := newFakeApp()
t.Run("same manifest with sync result", func(t *testing.T) {
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
assert.True(t, attempted)
})
t.Run("different manifest with sync result", func(t *testing.T) {
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, true)
assert.False(t, attempted)
})
}
func assertDurationAround(t *testing.T, expected time.Duration, actual time.Duration) {
delta := time.Second / 2
assert.GreaterOrEqual(t, expected, actual-delta)
assert.LessOrEqual(t, expected, actual+delta)
}
func TestSelfHealExponentialBackoff(t *testing.T) {
ctrl := newFakeController(&fakeData{}, nil)
ctrl.selfHealBackOff = &wait.Backoff{
Factor: 3,
Duration: 2 * time.Second,
Cap: 5 * time.Minute,
}
app := &v1alpha1.Application{
Status: v1alpha1.ApplicationStatus{
OperationState: &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
},
},
},
}
testCases := []struct {
attempts int64
finishedAt *metav1.Time
expectedDuration time.Duration
shouldSelfHeal bool
}{{
attempts: 0,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 0,
shouldSelfHeal: true,
}, {
attempts: 1,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 2 * time.Second,
shouldSelfHeal: false,
}, {
attempts: 2,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 6 * time.Second,
shouldSelfHeal: false,
}, {
attempts: 3,
finishedAt: nil,
expectedDuration: 18 * time.Second,
shouldSelfHeal: false,
}}
for i := range testCases {
tc := testCases[i]
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
app.Status.OperationState.FinishedAt = tc.finishedAt
ok, duration := ctrl.shouldSelfHeal(app)
require.Equal(t, ok, tc.shouldSelfHeal)
assertDurationAround(t, tc.expectedDuration, duration)
})
}
}

View File

@@ -197,7 +197,6 @@ type cacheSettings struct {
clusterSettings clustercache.Settings
appInstanceLabelKey string
trackingMethod appv1.TrackingMethod
installationID string
// resourceOverrides provides a list of ignored differences to ignore watched resource updates
resourceOverrides map[string]appv1.ResourceOverride
@@ -226,10 +225,6 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
if err != nil {
return nil, err
}
installationID, err := c.settingsMgr.GetInstallationID()
if err != nil {
return nil, err
}
resourceUpdatesOverrides, err := c.settingsMgr.GetIgnoreResourceUpdatesOverrides()
if err != nil {
return nil, err
@@ -251,7 +246,7 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
ResourcesFilter: resourcesFilter,
}
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), installationID, resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
}
func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
@@ -528,7 +523,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
res.Health, _ = health.GetResourceHealth(un, cacheSettings.clusterSettings.ResourceHealthOverride)
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod, cacheSettings.installationID)
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod)
if isRoot && appName != "" {
res.AppName = appName
}

View File

@@ -278,32 +278,6 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
}
func isPodInitializedConditionTrue(status *v1.PodStatus) bool {
for _, condition := range status.Conditions {
if condition.Type != v1.PodInitialized {
continue
}
return condition.Status == v1.ConditionTrue
}
return false
}
func isRestartableInitContainer(initContainer *v1.Container) bool {
if initContainer == nil {
return false
}
if initContainer.RestartPolicy == nil {
return false
}
return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
}
func isPodPhaseTerminal(phase v1.PodPhase) bool {
return phase == v1.PodFailed || phase == v1.PodSucceeded
}
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
pod := v1.Pod{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
@@ -314,8 +288,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
totalContainers := len(pod.Spec.Containers)
readyContainers := 0
podPhase := pod.Status.Phase
reason := string(podPhase)
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
@@ -333,21 +306,6 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
res.Images = append(res.Images, image)
}
// If the Pod carries {type:PodScheduled, reason:SchedulingGated}, set reason to 'SchedulingGated'.
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
reason = v1.PodReasonSchedulingGated
}
}
initContainers := make(map[string]*v1.Container)
for i := range pod.Spec.InitContainers {
initContainers[pod.Spec.InitContainers[i].Name] = &pod.Spec.InitContainers[i]
if isRestartableInitContainer(&pod.Spec.InitContainers[i]) {
totalContainers++
}
}
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
@@ -355,12 +313,6 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case isRestartableInitContainer(initContainers[container.Name]) &&
container.Started != nil && *container.Started:
if container.Ready {
readyContainers++
}
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
@@ -382,7 +334,8 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
}
break
}
if !initializing || isPodInitializedConditionTrue(&pod.Status) {
if !initializing {
restarts = 0
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
@@ -417,9 +370,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
// and https://github.com/kubernetes/kubernetes/issues/90358#issuecomment-617859364
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
reason = "Unknown"
// If the pod is being deleted and the pod phase is not succeeded or failed, set the reason to "Terminating".
// See https://github.com/kubernetes/kubectl/issues/1595#issuecomment-2080001023
} else if pod.DeletionTimestamp != nil && !isPodPhaseTerminal(podPhase) {
} else if pod.DeletionTimestamp != nil {
reason = "Terminating"
}

View File

@@ -285,552 +285,6 @@ func TestGetPodInfo(t *testing.T) {
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
}
func TestGetPodWithInitialContainerInfo(t *testing.T) {
pod := strToUnstructured(`
apiVersion: "v1"
kind: "Pod"
metadata:
labels:
app: "app-with-initial-container"
name: "app-with-initial-container-5f46976fdb-vd6rv"
namespace: "default"
ownerReferences:
- apiVersion: "apps/v1"
kind: "ReplicaSet"
name: "app-with-initial-container-5f46976fdb"
spec:
containers:
- image: "alpine:latest"
imagePullPolicy: "Always"
name: "app-with-initial-container"
initContainers:
- image: "alpine:latest"
imagePullPolicy: "Always"
name: "app-with-initial-container-logshipper"
nodeName: "minikube"
status:
containerStatuses:
- image: "alpine:latest"
name: "app-with-initial-container"
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2024-10-08T08:44:25Z"
initContainerStatuses:
- image: "alpine:latest"
name: "app-with-initial-container-logshipper"
ready: true
restartCount: 0
started: false
state:
terminated:
exitCode: 0
reason: "Completed"
phase: "Running"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/1"},
}, info.Info)
}
func TestGetPodInfoWithSidecar(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
labels:
app: app-with-sidecar
name: app-with-sidecar-6664cc788c-lqlrp
namespace: default
ownerReferences:
- apiVersion: apps/v1
kind: ReplicaSet
name: app-with-sidecar-6664cc788c
spec:
containers:
- image: 'docker.m.daocloud.io/library/alpine:latest'
imagePullPolicy: Always
name: app-with-sidecar
initContainers:
- image: 'docker.m.daocloud.io/library/alpine:latest'
imagePullPolicy: Always
name: logshipper
restartPolicy: Always
nodeName: minikube
status:
containerStatuses:
- image: 'docker.m.daocloud.io/library/alpine:latest'
name: app-with-sidecar
ready: true
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-08T08:39:43Z'
initContainerStatuses:
- image: 'docker.m.daocloud.io/library/alpine:latest'
name: logshipper
ready: true
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-08T08:39:40Z'
phase: Running
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "2/2"},
}, info.Info)
}
func TestGetPodInfoWithInitialContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
generateName: myapp-long-exist-56b7d8794d-
labels:
app: myapp-long-exist
name: myapp-long-exist-56b7d8794d-pbgrd
namespace: linghao
ownerReferences:
- apiVersion: apps/v1
kind: ReplicaSet
name: myapp-long-exist-56b7d8794d
spec:
containers:
- image: alpine:latest
imagePullPolicy: Always
name: myapp-long-exist
initContainers:
- image: alpine:latest
imagePullPolicy: Always
name: myapp-long-exist-logshipper
nodeName: minikube
status:
containerStatuses:
- image: alpine:latest
name: myapp-long-exist
ready: false
restartCount: 0
started: false
state:
waiting:
reason: PodInitializing
initContainerStatuses:
- image: alpine:latest
name: myapp-long-exist-logshipper
ready: false
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-09T08:03:45Z'
phase: Pending
startTime: '2024-10-09T08:02:39Z'
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod has 2 restartable init containers, the first one running but not started.
func TestGetPodInfoWithRestartableInitContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test1
spec:
initContainers:
- name: restartable-init-1
restartPolicy: Always
- name: restartable-init-2
restartPolicy: Always
containers:
- name: container
nodeName: minikube
status:
phase: Pending
initContainerStatuses:
- name: restartable-init-1
ready: false
restartCount: 3
state:
running: {}
started: false
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
- name: restartable-init-2
ready: false
state:
waiting: {}
started: false
containerStatuses:
- ready: false
restartCount: 0
state:
waiting: {}
conditions:
- type: ContainersReady
status: "False"
- type: Initialized
status: "False"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod has 2 restartable init containers, the first one started and the second one running but not started.
func TestGetPodInfoWithPartiallyStartedInitContainers(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test1
spec:
initContainers:
- name: restartable-init-1
restartPolicy: Always
- name: restartable-init-2
restartPolicy: Always
containers:
- name: container
nodeName: minikube
status:
phase: Pending
initContainerStatuses:
- name: restartable-init-1
ready: false
restartCount: 3
state:
running: {}
started: true
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
- name: restartable-init-2
ready: false
state:
running: {}
started: false
containerStatuses:
- ready: false
restartCount: 0
state:
waiting: {}
conditions:
- type: ContainersReady
status: "False"
- type: Initialized
status: "False"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:1/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod has 2 restartable init containers started and 1 container running
func TestGetPodInfoWithStartedInitContainers(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test2
spec:
initContainers:
- name: restartable-init-1
restartPolicy: Always
- name: restartable-init-2
restartPolicy: Always
containers:
- name: container
nodeName: minikube
status:
phase: Running
initContainerStatuses:
- name: restartable-init-1
ready: false
restartCount: 3
state:
running: {}
started: true
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
- name: restartable-init-2
ready: false
state:
running: {}
started: true
containerStatuses:
- ready: true
restartCount: 4
state:
running: {}
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
conditions:
- type: ContainersReady
status: "False"
- type: Initialized
status: "True"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/3"},
{Name: "Restart Count", Value: "7"},
}, info.Info)
}
// Test pod has 1 init container restarting and 1 container not running
func TestGetPodInfoWithNormalInitContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test7
spec:
initContainers:
- name: init-container
containers:
- name: main-container
nodeName: minikube
status:
phase: podPhase
initContainerStatuses:
- ready: false
restartCount: 3
state:
running: {}
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with the actual time
containerStatuses:
- ready: false
restartCount: 0
state:
waiting: {}
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod condition succeed
func TestPodConditionSucceeded(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test8
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Succeeded
containerStatuses:
- ready: false
restartCount: 0
state:
terminated:
reason: Completed
exitCode: 0
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition failed
func TestPodConditionFailed(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test9
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Failed
containerStatuses:
- ready: false
restartCount: 0
state:
terminated:
reason: Error
exitCode: 1
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Error"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition succeed with deletion
func TestPodConditionSucceededWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test10
deletionTimestamp: "2023-10-01T00:00:00Z"
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Succeeded
containerStatuses:
- ready: false
restartCount: 0
state:
terminated:
reason: Completed
exitCode: 0
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition running with deletion
func TestPodConditionRunningWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test11
deletionTimestamp: "2023-10-01T00:00:00Z"
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Running
containerStatuses:
- ready: false
restartCount: 0
state:
running: {}
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition pending with deletion
func TestPodConditionPendingWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test12
deletionTimestamp: "2023-10-01T00:00:00Z"
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Pending
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test PodScheduled condition with reason SchedulingGated
func TestPodScheduledWithSchedulingGated(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test13
spec:
nodeName: minikube
containers:
- name: container1
- name: container2
status:
phase: podPhase
conditions:
- type: PodScheduled
status: "False"
reason: SchedulingGated
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "SchedulingGated"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/2"},
}, info.Info)
}
func TestGetNodeInfo(t *testing.T) {
node := strToUnstructured(`
apiVersion: v1

View File

@@ -51,7 +51,7 @@ func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Applicat
revisions = append(revisions, src.TargetRevision)
}
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false)
targets, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false)
if err != nil {
return false, err
}

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"net/http"
"os"
"slices"
"strconv"
"time"
@@ -55,8 +54,7 @@ const (
var (
descAppDefaultLabels = []string{"namespace", "name", "project"}
descAppLabels *prometheus.Desc
descAppConditions *prometheus.Desc
descAppLabels *prometheus.Desc
descAppInfo = prometheus.NewDesc(
"argocd_app_info",
@@ -64,7 +62,6 @@ var (
append(descAppDefaultLabels, "autosync_enabled", "repo", "dest_server", "dest_namespace", "sync_status", "health_status", "operation"),
nil,
)
// Deprecated
descAppCreated = prometheus.NewDesc(
"argocd_app_created_time",
@@ -147,7 +144,7 @@ var (
)
// NewMetricsServer returns a new prometheus server which collects application metrics
func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, healthCheck func(r *http.Request) error, appLabels []string, appConditions []string) (*MetricsServer, error) {
func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, healthCheck func(r *http.Request) error, appLabels []string) (*MetricsServer, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, err
@@ -163,17 +160,8 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
)
}
if len(appConditions) > 0 {
descAppConditions = prometheus.NewDesc(
"argocd_app_condition",
"Report application conditions.",
append(descAppDefaultLabels, "condition"),
nil,
)
}
mux := http.NewServeMux()
registry := NewAppRegistry(appLister, appFilter, appLabels, appConditions)
registry := NewAppRegistry(appLister, appFilter, appLabels)
mux.Handle(MetricsPath, promhttp.HandlerFor(prometheus.Gatherers{
// contains app controller specific metrics
@@ -305,26 +293,24 @@ func (m *MetricsServer) SetExpiration(cacheExpiration time.Duration) error {
}
type appCollector struct {
store applister.ApplicationLister
appFilter func(obj interface{}) bool
appLabels []string
appConditions []string
store applister.ApplicationLister
appFilter func(obj interface{}) bool
appLabels []string
}
// NewAppCollector returns a prometheus collector for application metrics
func NewAppCollector(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string, appConditions []string) prometheus.Collector {
func NewAppCollector(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string) prometheus.Collector {
return &appCollector{
store: appLister,
appFilter: appFilter,
appLabels: appLabels,
appConditions: appConditions,
store: appLister,
appFilter: appFilter,
appLabels: appLabels,
}
}
// NewAppRegistry creates a new prometheus registry that collects applications
func NewAppRegistry(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string, appConditions []string) *prometheus.Registry {
func NewAppRegistry(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string) *prometheus.Registry {
registry := prometheus.NewRegistry()
registry.MustRegister(NewAppCollector(appLister, appFilter, appLabels, appConditions))
registry.MustRegister(NewAppCollector(appLister, appFilter, appLabels))
return registry
}
@@ -333,9 +319,6 @@ func (c *appCollector) Describe(ch chan<- *prometheus.Desc) {
if len(c.appLabels) > 0 {
ch <- descAppLabels
}
if len(c.appConditions) > 0 {
ch <- descAppConditions
}
ch <- descAppInfo
ch <- descAppSyncStatusCode
ch <- descAppHealthStatus
@@ -400,19 +383,6 @@ func (c *appCollector) collectApps(ch chan<- prometheus.Metric, app *argoappv1.A
addGauge(descAppLabels, 1, labelValues...)
}
if len(c.appConditions) > 0 {
conditionCount := make(map[string]int)
for _, condition := range app.Status.Conditions {
if slices.Contains(c.appConditions, condition.Type) {
conditionCount[condition.Type]++
}
}
for conditionType, count := range conditionCount {
addGauge(descAppConditions, float64(count), conditionType)
}
}
// Deprecated controller metrics
if os.Getenv(EnvVarLegacyControllerMetrics) == "true" {
addGauge(descAppCreated, float64(app.CreationTimestamp.Unix()))

View File

@@ -116,41 +116,6 @@ status:
status: Degraded
`
const fakeApp4 = `
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: my-app-4
namespace: argocd
labels:
team-name: my-team
team-bu: bu-id
argoproj.io/cluster: test-cluster
spec:
destination:
namespace: dummy-namespace
server: https://localhost:6443
project: important-project
source:
path: some/path
repoURL: https://github.com/argoproj/argocd-example-apps.git
status:
sync:
status: OutOfSync
health:
status: Degraded
conditions:
- lastTransitionTime: "2024-08-07T12:25:40Z"
message: Application has 1 orphaned resources
type: OrphanedResourceWarning
- lastTransitionTime: "2024-08-07T12:25:40Z"
message: Resource Pod standalone-pod is excluded in the settings
type: ExcludedResourceWarning
- lastTransitionTime: "2024-08-07T12:25:40Z"
message: Resource Endpoint raw-endpoint is excluded in the settings
type: ExcludedResourceWarning
`
const fakeDefaultApp = `
apiVersion: argoproj.io/v1alpha1
kind: Application
@@ -214,7 +179,7 @@ func newFakeLister(fakeAppYAMLs ...string) (context.CancelFunc, applister.Applic
func testApp(t *testing.T, fakeAppYAMLs []string, expectedResponse string) {
t.Helper()
testMetricServer(t, fakeAppYAMLs, expectedResponse, []string{}, []string{})
testMetricServer(t, fakeAppYAMLs, expectedResponse, []string{})
}
type fakeClusterInfo struct {
@@ -229,17 +194,15 @@ type TestMetricServerConfig struct {
FakeAppYAMLs []string
ExpectedResponse string
AppLabels []string
AppConditions []string
ClustersInfo []gitopsCache.ClusterInfo
}
func testMetricServer(t *testing.T, fakeAppYAMLs []string, expectedResponse string, appLabels []string, appConditions []string) {
func testMetricServer(t *testing.T, fakeAppYAMLs []string, expectedResponse string, appLabels []string) {
t.Helper()
cfg := TestMetricServerConfig{
FakeAppYAMLs: fakeAppYAMLs,
ExpectedResponse: expectedResponse,
AppLabels: appLabels,
AppConditions: appConditions,
ClustersInfo: []gitopsCache.ClusterInfo{},
}
runTest(t, cfg)
@@ -249,7 +212,7 @@ func runTest(t *testing.T, cfg TestMetricServerConfig) {
t.Helper()
cancel, appLister := newFakeLister(cfg.FakeAppYAMLs...)
defer cancel()
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, cfg.AppLabels, cfg.AppConditions)
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, cfg.AppLabels)
require.NoError(t, err)
if len(cfg.ClustersInfo) > 0 {
@@ -340,61 +303,7 @@ argocd_app_labels{label_non_existing="",name="my-app-3",namespace="argocd",proje
for _, c := range cases {
c := c
t.Run(c.description, func(t *testing.T) {
testMetricServer(t, c.applications, c.responseContains, c.metricLabels, []string{})
})
}
}
func TestMetricConditions(t *testing.T) {
type testCases struct {
testCombination
description string
metricConditions []string
}
cases := []testCases{
{
description: "metric will only output OrphanedResourceWarning",
metricConditions: []string{"OrphanedResourceWarning"},
testCombination: testCombination{
applications: []string{fakeApp4},
responseContains: `
# HELP argocd_app_condition Report application conditions.
# TYPE argocd_app_condition gauge
argocd_app_condition{condition="OrphanedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 1
`,
},
},
{
description: "metric will only output ExcludedResourceWarning",
metricConditions: []string{"ExcludedResourceWarning"},
testCombination: testCombination{
applications: []string{fakeApp4},
responseContains: `
# HELP argocd_app_condition Report application conditions.
# TYPE argocd_app_condition gauge
argocd_app_condition{condition="ExcludedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 2
`,
},
},
{
description: "metric will only output both OrphanedResourceWarning and ExcludedResourceWarning",
metricConditions: []string{"ExcludedResourceWarning", "OrphanedResourceWarning"},
testCombination: testCombination{
applications: []string{fakeApp4},
responseContains: `
# HELP argocd_app_condition Report application conditions.
# TYPE argocd_app_condition gauge
argocd_app_condition{condition="OrphanedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 1
argocd_app_condition{condition="ExcludedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 2
`,
},
},
}
for _, c := range cases {
c := c
t.Run(c.description, func(t *testing.T) {
testMetricServer(t, c.applications, c.responseContains, []string{}, c.metricConditions)
testMetricServer(t, c.applications, c.responseContains, c.metricLabels)
})
}
}
@@ -426,7 +335,7 @@ argocd_app_sync_status{name="my-app",namespace="argocd",project="important-proje
func TestMetricsSyncCounter(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
require.NoError(t, err)
appSyncTotal := `
@@ -478,7 +387,7 @@ func assertMetricsNotPrinted(t *testing.T, expectedLines, body string) {
func TestReconcileMetrics(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
require.NoError(t, err)
appReconcileMetrics := `
@@ -511,7 +420,7 @@ argocd_app_reconcile_count{dest_server="https://localhost:6443",namespace="argoc
func TestMetricsReset(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
require.NoError(t, err)
appSyncTotal := `
@@ -548,7 +457,7 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa
func TestWorkqueueMetrics(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
require.NoError(t, err)
expectedMetrics := `
@@ -583,7 +492,7 @@ workqueue_unfinished_work_seconds{controller="test",name="test"}
func TestGoMetrics(t *testing.T) {
cancel, appLister := newFakeLister()
defer cancel()
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
require.NoError(t, err)
expectedMetrics := `

View File

@@ -70,7 +70,7 @@ type managedResource struct {
type AppStateManager interface {
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error)
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error)
}
// comparisonResult holds the state of an application after the reconciliation
@@ -88,7 +88,6 @@ type comparisonResult struct {
timings map[string]time.Duration
diffResultList *diff.DiffResultList
hasPostDeleteHooks bool
revisionUpdated bool
}
func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus {
@@ -124,56 +123,51 @@ type appStateManager struct {
// task to the repo-server. It returns the list of generated manifests as unstructured
// objects. It also returns the full response from all calls to the repo server as the
// second argument.
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error) {
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {
ts := stats.NewTimingStats()
helmRepos, err := m.db.ListHelmRepositories(context.Background())
if err != nil {
return nil, nil, false, fmt.Errorf("failed to list Helm repositories: %w", err)
return nil, nil, fmt.Errorf("failed to list Helm repositories: %w", err)
}
permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos)
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err)
return nil, nil, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err)
}
ts.AddCheckpoint("repo_ms")
helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(context.Background())
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get Helm credentials: %w", err)
return nil, nil, fmt.Errorf("failed to get Helm credentials: %w", err)
}
permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials)
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err)
return nil, nil, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err)
}
enabledSourceTypes, err := m.settingsMgr.GetEnabledSourceTypes()
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get enabled source types: %w", err)
return nil, nil, fmt.Errorf("failed to get enabled source types: %w", err)
}
ts.AddCheckpoint("plugins_ms")
kustomizeSettings, err := m.settingsMgr.GetKustomizeSettings()
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get Kustomize settings: %w", err)
return nil, nil, fmt.Errorf("failed to get Kustomize settings: %w", err)
}
helmOptions, err := m.settingsMgr.GetHelmSettings()
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get Helm settings: %w", err)
}
installationID, err := m.settingsMgr.GetInstallationID()
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get installation ID: %w", err)
return nil, nil, fmt.Errorf("failed to get Helm settings: %w", err)
}
ts.AddCheckpoint("build_options_ms")
serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)
return nil, nil, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)
}
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
if err != nil {
return nil, nil, false, fmt.Errorf("failed to connect to repo server: %w", err)
return nil, nil, fmt.Errorf("failed to connect to repo server: %w", err)
}
defer io.Close(conn)
@@ -185,26 +179,20 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
// revisions for the rollback
refSources, err := argo.GetRefSources(context.Background(), sources, app.Spec.Project, m.db.GetRepository, revisions, rollback)
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get ref sources: %w", err)
return nil, nil, fmt.Errorf("failed to get ref sources: %w", err)
}
revisionUpdated := false
atLeastOneRevisionIsNotPossibleToBeUpdated := false
keyManifestGenerateAnnotationVal, keyManifestGenerateAnnotationExists := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
for i, source := range sources {
if len(revisions) < len(sources) || revisions[i] == "" {
revisions[i] = source.TargetRevision
}
repo, err := m.db.GetRepository(context.Background(), source.RepoURL, proj.Name)
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)
return nil, nil, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)
}
kustomizeOptions, err := kustomizeSettings.GetOptions(source)
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)
return nil, nil, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)
}
syncedRevision := app.Status.Sync.Revision
@@ -216,15 +204,13 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
}
}
revision := revisions[i]
if !source.IsHelm() && syncedRevision != "" && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" {
val, ok := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
if !source.IsHelm() && syncedRevision != "" && ok && val != "" {
// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
updateRevisionResult, err := repoClient.UpdateRevisionForPaths(context.Background(), &apiclient.UpdateRevisionForPathsRequest{
_, err = repoClient.UpdateRevisionForPaths(context.Background(), &apiclient.UpdateRevisionForPathsRequest{
Repo: repo,
Revision: revision,
Revision: revisions[i],
SyncedRevision: syncedRevision,
NoRevisionCache: noRevisionCache,
Paths: path.GetAppRefreshPaths(app),
AppLabelKey: appLabelKey,
AppName: app.InstanceName(m.namespace),
@@ -235,29 +221,17 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
RefSources: refSources,
HasMultipleSources: app.Spec.HasMultipleSources(),
InstallationID: installationID,
})
if err != nil {
return nil, nil, false, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
return nil, nil, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
}
if updateRevisionResult.Changes {
revisionUpdated = true
}
// Generate manifests should use same revision as updateRevisionForPaths, because HEAD revision may be different between these two calls
if updateRevisionResult.Revision != "" {
revision = updateRevisionResult.Revision
}
} else {
// revisionUpdated is set to true if at least one revision is not possible to be updated,
atLeastOneRevisionIsNotPossibleToBeUpdated = true
}
log.Debugf("Generating Manifest for source %s revision %s", source, revision)
log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i])
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
Repo: repo,
Repos: permittedHelmRepos,
Revision: revision,
Revision: revisions[i],
NoCache: noCache,
NoRevisionCache: noRevisionCache,
AppLabelKey: appLabelKey,
@@ -276,15 +250,14 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
RefSources: refSources,
ProjectName: proj.Name,
ProjectSourceRepos: proj.Spec.SourceRepos,
InstallationID: installationID,
})
if err != nil {
return nil, nil, false, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
return nil, nil, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
}
targetObj, err := unmarshalManifests(manifestInfo.Manifests)
if err != nil {
return nil, nil, false, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err)
return nil, nil, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err)
}
targetObjs = append(targetObjs, targetObj...)
manifestInfos = append(manifestInfos, manifestInfo)
@@ -297,13 +270,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
}
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
logCtx.Info("GetRepoObjs stats")
// in case if annotation not exists, we should always execute selfheal if manifests changed
if atLeastOneRevisionIsNotPossibleToBeUpdated {
revisionUpdated = true
}
return targetObjs, manifestInfos, revisionUpdated, nil
return targetObjs, manifestInfos, nil
}
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {
@@ -360,24 +327,20 @@ func DeduplicateTargetObjects(
// getComparisonSettings will return the system level settings related to the
// diff/normalization process.
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, string, error) {
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, error) {
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
if err != nil {
return "", nil, nil, "", err
return "", nil, nil, err
}
appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
if err != nil {
return "", nil, nil, "", err
return "", nil, nil, err
}
resFilter, err := m.settingsMgr.GetResourcesFilter()
if err != nil {
return "", nil, nil, "", err
return "", nil, nil, err
}
installationID, err := m.settingsMgr.GetInstallationID()
if err != nil {
return "", nil, nil, "", err
}
return appLabelKey, resourceOverrides, resFilter, installationID, nil
return appLabelKey, resourceOverrides, resFilter, nil
}
// verifyGnuPGSignature verifies the result of a GnuPG operation for a given git
@@ -428,7 +391,7 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error) {
ts := stats.NewTimingStats()
appLabelKey, resourceOverrides, resFilter, installationID, err := m.getComparisonSettings()
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
ts.AddCheckpoint("settings_ms")
@@ -457,7 +420,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
// When signature keys are defined in the project spec, we need to verify the signature on the Git revision
verifySignature := false
if len(project.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled() {
if project.Spec.SignatureKeys != nil && len(project.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled() {
verifySignature = true
}
@@ -474,8 +437,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
var manifestInfos []*apiclient.ManifestResponse
targetNsExists := false
var revisionUpdated bool
if len(localManifests) == 0 {
// If the length of revisions is not same as the length of sources,
// we take the revisions from the sources directly for all the sources.
@@ -486,7 +447,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
}
}
targetObjs, manifestInfos, revisionUpdated, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, rollback)
targetObjs, manifestInfos, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, rollback)
if err != nil {
targetObjs = make([]*unstructured.Unstructured, 0)
msg := fmt.Sprintf("Failed to load target state: %s", err.Error())
@@ -596,7 +557,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
for _, liveObj := range liveObjByKey {
if liveObj != nil {
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod, installationID)
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod)
if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) {
fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/")
conditions = append(conditions, v1alpha1.ApplicationCondition{
@@ -735,7 +696,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
}
gvk := obj.GroupVersionKind()
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod, installationID)
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)
resState := v1alpha1.ResourceStatus{
Namespace: obj.GetNamespace(),
@@ -877,7 +838,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
diffConfig: diffConfig,
diffResultList: diffResults,
hasPostDeleteHooks: hasPostDeleteHooks,
revisionUpdated: revisionUpdated,
}
if hasMultipleSources {
@@ -935,7 +895,9 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
return false
}
if !specEqualsCompareTo(app.Spec, app.Status.Sync.ComparedTo) {
currentSpec := app.BuildComparedToStatus()
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)
if specChanged {
log.WithField("useDiffCache", "false").Debug("specChanged")
return false
}
@@ -944,29 +906,6 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
return true
}
// specEqualsCompareTo compares the application spec to the comparedTo status. It normalizes the destination to match
// the comparedTo destination before comparing. It does not mutate the original spec or comparedTo.
func specEqualsCompareTo(spec v1alpha1.ApplicationSpec, comparedTo v1alpha1.ComparedTo) bool {
// Make a copy to be sure we don't mutate the original.
specCopy := spec.DeepCopy()
currentSpec := specCopy.BuildComparedToStatus()
// The spec might have been augmented to include both server and name, so change it to match the comparedTo before
// comparing.
if comparedTo.Destination.Server == "" {
currentSpec.Destination.Server = ""
}
if comparedTo.Destination.Name == "" {
currentSpec.Destination.Name = ""
}
// Set IsServerInferred to false on both, because that field is not important for comparison.
comparedTo.Destination.SetIsServerInferred(false)
currentSpec.Destination.SetIsServerInferred(false)
return reflect.DeepEqual(comparedTo, currentSpec)
}
func (m *appStateManager) persistRevisionHistory(
app *v1alpha1.Application,
revision string,
@@ -1061,7 +1000,7 @@ func NewAppStateManager(
// group and kind) match the properties of the live object, or if the tracking method
// used does not provide the required properties for matching.
// Reference: https://github.com/argoproj/argo-cd/issues/8683
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, installationID string) bool {
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
if live == nil {
return true
}
@@ -1094,7 +1033,7 @@ func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstruc
// to match the properties from the live object. Cluster scoped objects
// carry the app's destination namespace in the tracking annotation,
// but are unique in GVK + name combination.
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod, installationID)
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)
if appInstance != nil {
return isSelfReferencedObj(live, *appInstance)
}

View File

@@ -1372,8 +1372,8 @@ func TestIsLiveResourceManaged(t *testing.T) {
configObj := managedObj.DeepCopy()
// then
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
t.Run("will return true if tracked with label", func(t *testing.T) {
// given
@@ -1381,43 +1381,43 @@ func TestIsLiveResourceManaged(t *testing.T) {
configObj := managedObjWithLabel.DeepCopy()
// then
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
})
t.Run("will handle if trackingId has wrong resource name and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
t.Run("will handle if trackingId has wrong resource group and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
t.Run("will handle if trackingId has wrong kind and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
t.Run("will handle if trackingId has wrong namespace and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel, ""))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
})
t.Run("will return true if live is nil", func(t *testing.T) {
t.Parallel()
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
t.Run("will handle upgrade in desired state APIGroup", func(t *testing.T) {
@@ -1427,13 +1427,11 @@ func TestIsLiveResourceManaged(t *testing.T) {
delete(config.GetAnnotations(), common.AnnotationKeyAppInstance)
// then
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
})
}
func TestUseDiffCache(t *testing.T) {
t.Parallel()
type fixture struct {
testName string
noCache bool
@@ -1529,10 +1527,6 @@ func TestUseDiffCache(t *testing.T) {
t.Fatalf("error merging app: %s", err)
}
}
if app.Spec.Destination.Name != "" && app.Spec.Destination.Server != "" {
// Simulate the controller's process for populating both of these fields.
app.Spec.Destination.SetInferredServer(app.Spec.Destination.Server)
}
return app
}
@@ -1698,44 +1692,6 @@ func TestUseDiffCache(t *testing.T) {
expectedUseCache: false,
serverSideDiff: false,
},
{
// There are code paths that modify the ApplicationSpec and augment the destination field with both the
// destination server and name. Since both fields are populated in the app spec but not in the comparedTo,
// we need to make sure we correctly compare the fields and don't miss the cache.
testName: "will return true if the app spec destination contains both server and name, but otherwise matches comparedTo",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, &argoappv1.Application{
Spec: argoappv1.ApplicationSpec{
Destination: argoappv1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Name: "httpbin",
Namespace: "httpbin",
},
},
Status: argoappv1.ApplicationStatus{
Resources: []argoappv1.ResourceStatus{},
Sync: argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeSynced,
ComparedTo: argoappv1.ComparedTo{
Destination: argoappv1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "httpbin",
},
},
Revision: "rev1",
},
ReconciledAt: &metav1.Time{
Time: time.Now().Add(-time.Hour),
},
},
}),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: true,
serverSideDiff: true,
},
}
for _, tc := range cases {
@@ -1754,49 +1710,3 @@ func TestUseDiffCache(t *testing.T) {
})
}
}
func TestCompareAppStateDefaultRevisionUpdated(t *testing.T) {
app := newFakeApp()
data := fakeData{
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
require.NoError(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.True(t, compRes.revisionUpdated)
}
func TestCompareAppStateRevisionUpdatedWithHelmSource(t *testing.T) {
app := newFakeMultiSourceApp()
data := fakeData{
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: test.FakeClusterURL,
Revision: "abc123",
},
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
}
ctrl := newFakeController(&data, nil)
sources := make([]argoappv1.ApplicationSource, 0)
sources = append(sources, app.Spec.GetSource())
revisions := make([]string, 0)
revisions = append(revisions, "")
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
require.NoError(t, err)
assert.NotNil(t, compRes)
assert.NotNil(t, compRes.syncStatus)
assert.True(t, compRes.revisionUpdated)
}

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"os"
"strconv"
"strings"
"sync/atomic"
"time"
@@ -24,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/managedfields"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/util/openapi"
"github.com/argoproj/argo-cd/v2/controller/metrics"
@@ -32,7 +30,6 @@ import (
listersv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/argo/diff"
"github.com/argoproj/argo-cd/v2/util/glob"
logutils "github.com/argoproj/argo-cd/v2/util/log"
"github.com/argoproj/argo-cd/v2/util/lua"
"github.com/argoproj/argo-cd/v2/util/rand"
@@ -44,10 +41,6 @@ const (
// EnvVarSyncWaveDelay is an environment variable which controls the delay in seconds between
// each sync-wave
EnvVarSyncWaveDelay = "ARGOCD_SYNC_WAVE_DELAY"
// serviceAccountDisallowedCharSet contains the characters that are not allowed to be present
// in a DefaultServiceAccount configured for a DestinationServiceAccount
serviceAccountDisallowedCharSet = "!*[]{}\\/"
)
func (m *appStateManager) getOpenAPISchema(server string) (openapi.Resources, error) {
@@ -174,18 +167,12 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
state.Phase = common.OperationError
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
return
} else {
isBlocked, err := syncWindowPreventsSync(app, proj)
if isBlocked {
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
if state.Phase == common.OperationRunning {
state.Message = "Sync operation blocked by sync window"
if err != nil {
state.Message = fmt.Sprintf("%s: %v", state.Message, err)
}
}
return
} else if syncWindowPreventsSync(app, proj) {
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
if state.Phase == common.OperationRunning {
state.Message = "Sync operation blocked by sync window"
}
return
}
if !isMultiSourceRevision {
@@ -295,35 +282,8 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
log.Errorf("Could not get appInstanceLabelKey: %v", err)
return
}
installationID, err := m.settingsMgr.GetInstallationID()
if err != nil {
log.Errorf("Could not get installation ID: %v", err)
return
}
trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
impersonationEnabled, err := m.settingsMgr.IsImpersonationEnabled()
if err != nil {
log.Errorf("could not get impersonation feature flag: %v", err)
return
}
if impersonationEnabled {
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(proj, app)
if err != nil {
state.Phase = common.OperationError
state.Message = fmt.Sprintf("failed to find a matching service account to impersonate: %v", err)
return
}
logEntry = logEntry.WithFields(log.Fields{"impersonationEnabled": "true", "serviceAccount": serviceAccountToImpersonate})
// set the impersonation headers.
rawConfig.Impersonate = rest.ImpersonationConfig{
UserName: serviceAccountToImpersonate,
}
restConfig.Impersonate = rest.ImpersonationConfig{
UserName: serviceAccountToImpersonate,
}
}
opts := []sync.SyncOpt{
sync.WithLogr(logutils.NewLogrusLogger(logEntry)),
sync.WithHealthOverride(lua.ResourceHealthOverrides(resourceOverrides)),
@@ -351,7 +311,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
return (len(syncOp.Resources) == 0 ||
isPostDeleteHook(target) ||
argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) &&
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod, installationID)
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod)
}),
sync.WithManifestValidation(!syncOp.SyncOptions.HasOption(common.SyncOptionsDisableValidation)),
sync.WithSyncWaveHook(delayBetweenSyncWaves),
@@ -568,52 +528,11 @@ func delayBetweenSyncWaves(phase common.SyncPhase, wave int, finalWave bool) err
return nil
}
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) (bool, error) {
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) bool {
window := proj.Spec.SyncWindows.Matches(app)
isManual := false
if app.Status.OperationState != nil {
isManual = !app.Status.OperationState.Operation.InitiatedBy.Automated
}
canSync, err := window.CanSync(isManual)
if err != nil {
// prevents sync because sync window has an error
return true, err
}
return !canSync, nil
}
// deriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
// spec.Destination.Namespace is optional. If not specified, use the Application's
// namespace
serviceAccountNamespace := application.Spec.Destination.Namespace
if serviceAccountNamespace == "" {
serviceAccountNamespace = application.Namespace
}
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
// if so, return the service account specified for that destination.
for _, item := range project.Spec.DestinationServiceAccounts {
dstServerMatched, err := glob.MatchWithError(item.Server, application.Spec.Destination.Server)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
}
dstNamespaceMatched, err := glob.MatchWithError(item.Namespace, application.Spec.Destination.Namespace)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination namespace: %w", err)
}
if dstServerMatched && dstNamespaceMatched {
if strings.Trim(item.DefaultServiceAccount, " ") == "" || strings.ContainsAny(item.DefaultServiceAccount, serviceAccountDisallowedCharSet) {
return "", fmt.Errorf("default service account contains invalid chars '%s'", item.DefaultServiceAccount)
} else if strings.Contains(item.DefaultServiceAccount, ":") {
// service account is specified along with its namespace.
return fmt.Sprintf("system:serviceaccount:%s", item.DefaultServiceAccount), nil
} else {
// service account needs to be prefixed with a namespace
return fmt.Sprintf("system:serviceaccount:%s:%s", serviceAccountNamespace, item.DefaultServiceAccount), nil
}
}
}
// if there is no match found in the AppProject.Spec.DestinationServiceAccounts, use the default service account of the destination namespace.
return "", fmt.Errorf("no matching service account found for destination server %s and namespace %s", application.Spec.Destination.Server, serviceAccountNamespace)
return !window.CanSync(isManual)
}

View File

@@ -2,7 +2,6 @@ package controller
import (
"context"
"strconv"
"testing"
"github.com/argoproj/gitops-engine/pkg/sync"
@@ -10,7 +9,6 @@ import (
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@@ -646,771 +644,6 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
})
}
func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
}
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
project := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd-ns",
Name: "testProj",
},
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: destinationServiceAccounts,
},
}
app := &v1alpha1.Application{
ObjectMeta: v1.ObjectMeta{
Namespace: applicationNamespace,
Name: "testApp",
},
Spec: v1alpha1.ApplicationSpec{
Project: "testProj",
Destination: v1alpha1.ApplicationDestination{
Server: destinationServerURL,
Namespace: destinationNamespace,
},
},
}
return &fixture{
project: project,
application: app,
}
}
t.Run("empty destination service accounts", func(t *testing.T) {
// given an application referring a project with no destination service accounts
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
assert.Equal(t, expectedSA, sa)
// then, there should be an error saying no valid match was found
assert.EqualError(t, err, expectedErrMsg)
})
t.Run("exact match of destination namespace", func(t *testing.T) {
// given an application referring a project with exactly one destination service account that matches the application destination,
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having one exact match for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook-test",
DefaultServiceAccount: "guestbook-test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having multiple match for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-3",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be no error and it should use the first matching service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with glob patterns matching the application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "test*",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and should use the first matching glob pattern service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("no match among a valid list", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with no matches for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "test1",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "test2",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be an error saying no match was found
require.EqualError(t, err, expectedErrMsg)
assert.Equal(t, expectedSA, sa)
})
t.Run("app destination namespace is empty", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with empty application destination namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa-2",
},
}
destinationNamespace := ""
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:argocd-ns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the service account configured for with empty namespace should be used.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via catch all glob pattern", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having a catch all glob pattern
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns1",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the catch all service account should be returned
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via invalid glob pattern", func(t *testing.T) {
// given an application referring a project with a destination service account having an invalid glob pattern for namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "e[[a*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination namespace")
assert.Equal(t, expectedSA, sa)
})
t.Run("sa specified with a namespace", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having a matching service account specified with its namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "myns:test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:myns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.
require.NoError(t, err)
})
}
func TestDeriveServiceAccountMatchingServers(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
}
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
project := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd-ns",
Name: "testProj",
},
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: destinationServiceAccounts,
},
}
app := &v1alpha1.Application{
ObjectMeta: v1.ObjectMeta{
Namespace: applicationNamespace,
Name: "testApp",
},
Spec: v1alpha1.ApplicationSpec{
Project: "testProj",
Destination: v1alpha1.ApplicationDestination{
Server: destinationServerURL,
Namespace: destinationNamespace,
},
},
}
return &fixture{
project: project,
application: app,
}
}
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts and one exact match for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
{
Server: "https://abc.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-test-sa",
},
{
Server: "https://cde.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the right service account must be returned.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts and multiple matches for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and first matching service account should be used
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with a matching glob pattern and exact match
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "test*",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account of the glob pattern, being the first match should be returned.
require.NoError(t, err)
})
t.Run("no match among a valid list", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with no match
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://abc.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://cde.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://xyz.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
expectedErr := "no matching service account found for destination server https://xyz.svc.local and namespace testns"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there an error with appropriate message must be returned
require.EqualError(t, err, expectedErr)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via catch all glob pattern", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with matching catch all glob pattern
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns1",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "*",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://localhost:6443"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the service account of the glob pattern match must be returned.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via invalid glob pattern", func(t *testing.T) {
// given an application referring a project with a destination service account having an invalid glob pattern for server
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "e[[a*",
Namespace: "test-ns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination server")
assert.Equal(t, expectedSA, sa)
})
t.Run("sa specified with a namespace", func(t *testing.T) {
// given app sync impersonation feature is enabled and matching service account is prefixed with a namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://abc.svc.local",
Namespace: "testns",
DefaultServiceAccount: "myns:test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "*",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://abc.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:myns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the service account with the given namespace prefix must be returned.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
}
func TestSyncWithImpersonate(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
controller *ApplicationController
}
setup := func(impersonationEnabled bool, destinationNamespace, serviceAccountName string) *fixture {
app := newFakeApp()
app.Status.OperationState = nil
app.Status.History = nil
project := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
Name: "default",
},
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.
ApplicationDestinationServiceAccount{
{
Server: "https://localhost:6443",
Namespace: destinationNamespace,
DefaultServiceAccount: serviceAccountName,
},
},
},
}
additionalObjs := []runtime.Object{}
if serviceAccountName != "" {
syncServiceAccount := &corev1.ServiceAccount{
ObjectMeta: v1.ObjectMeta{
Name: serviceAccountName,
Namespace: test.FakeDestNamespace,
},
}
additionalObjs = append(additionalObjs, syncServiceAccount)
}
data := fakeData{
apps: []runtime.Object{app, project},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: "https://localhost:6443",
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{},
configMapData: map[string]string{
"application.sync.impersonation.enabled": strconv.FormatBool(impersonationEnabled),
},
additionalObjs: additionalObjs,
}
ctrl := newFakeController(&data, nil)
return &fixture{
project: project,
application: app,
controller: ctrl,
}
}
t.Run("sync with impersonation and no matching service account", func(t *testing.T) {
// given app sync impersonation feature is enabled with an application referring a project no matching service account
f := setup(true, test.FakeArgoCDNamespace, "")
opMessage := "failed to find a matching service account to impersonate: no matching service account found for destination server https://localhost:6443 and namespace fake-dest-ns"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then, app sync should fail with expected error message in operation state
assert.Equal(t, common.OperationError, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
t.Run("sync with impersonation and empty service account match", func(t *testing.T) {
// given app sync impersonation feature is enabled with an application referring a project matching service account that is an empty string
f := setup(true, test.FakeDestNamespace, "")
opMessage := "failed to find a matching service account to impersonate: default service account contains invalid chars ''"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then app sync should fail with expected error message in operation state
assert.Equal(t, common.OperationError, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
t.Run("sync with impersonation and matching sa", func(t *testing.T) {
// given app sync impersonation feature is enabled with an application referring a project matching service account
f := setup(true, test.FakeDestNamespace, "test-sa")
opMessage := "successfully synced (no more tasks)"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then app sync should not fail
assert.Equal(t, common.OperationSucceeded, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
t.Run("sync without impersonation", func(t *testing.T) {
// given app sync impersonation feature is disabled with an application referring a project matching service account
f := setup(false, test.FakeDestNamespace, "")
opMessage := "successfully synced (no more tasks)"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then application sync should pass using the control plane service account
assert.Equal(t, common.OperationSucceeded, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
}
func dig[T any](obj interface{}, path []interface{}) T {
i := obj

View File

@@ -6,7 +6,7 @@ metadata:
deployment.kubernetes.io/revision: '9'
iksm-version: '2.0'
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"quay.io/argoprojlabs/argocd-e2e-container:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}}
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}}
creationTimestamp: '2022-01-05T15:45:21Z'
generation: 119
managedFields:
@@ -137,7 +137,7 @@ spec:
- env:
- name: SOME_ENV_VAR
value: some_value
image: 'quay.io/argoprojlabs/argocd-e2e-container:0.1'
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
imagePullPolicy: IfNotPresent
name: guestbook-ui
ports:

View File

@@ -6,7 +6,7 @@ metadata:
deployment.kubernetes.io/revision: '9'
iksm-version: '2.0'
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"quay.io/argoprojlabs/argocd-e2e-container:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}}
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}}
creationTimestamp: '2022-01-05T15:45:21Z'
generation: 119
managedFields:
@@ -137,7 +137,7 @@ spec:
- env:
- name: SOME_ENV_VAR
value: some_value
image: 'quay.io/argoprojlabs/argocd-e2e-container:0.1'
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
imagePullPolicy: IfNotPresent
name: guestbook-ui
ports:

View File

@@ -25,7 +25,7 @@ spec:
value: yet_another_value
- name: SOME_ENV_VAR
value: different_value!
image: 'quay.io/argoprojlabs/argocd-e2e-container:0.1'
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
name: guestbook-ui
ports:
- containerPort: 80

View File

@@ -19,7 +19,7 @@ spec:
spec:
containers:
- name: guestbook-ui
image: 'quay.io/argoprojlabs/argocd-e2e-container:0.1'
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
env:
- name: SOME_ENV_VAR
value: some_value

View File

@@ -21,7 +21,7 @@ spec:
- env:
- name: SOME_ENV_VAR
value: some_value
image: 'quay.io/argoprojlabs/argocd-e2e-container:0.1'
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
name: guestbook-ui
ports:
- containerPort: 80

Binary file not shown.

Before

Width:  |  Height:  |  Size: 163 KiB

After

Width:  |  Height:  |  Size: 117 KiB

View File

@@ -32,41 +32,23 @@ function initializeVersionDropdown() {
window[callbackName] = function(response) {
const div = document.createElement('div');
div.innerHTML = response.html;
const headerTitle = document.querySelector(".md-header__inner > .md-header__title");
if (headerTitle) {
headerTitle.appendChild(div);
}
document.querySelector(".md-header__inner > .md-header__title").appendChild(div);
const container = div.querySelector('.rst-versions');
if (!container) return; // Exit if container not found
// Add caret icon
var caret = document.createElement('div');
caret.innerHTML = "<i class='fa fa-caret-down dropdown-caret'></i>";
caret.classList.add('dropdown-caret');
const currentVersionElem = div.querySelector('.rst-current-version');
if (currentVersionElem) {
currentVersionElem.appendChild(caret);
}
div.querySelector('.rst-current-version').appendChild(caret);
// Add click listener to toggle dropdown
if (currentVersionElem && container) {
currentVersionElem.addEventListener('click', function() {
container.classList.toggle('shift-up');
});
}
// Sorting Logic
sortVersionLinks(container);
div.querySelector('.rst-current-version').addEventListener('click', function() {
container.classList.toggle('shift-up');
});
};
// Load CSS
var CSSLink = document.createElement('link');
CSSLink.rel = 'stylesheet';
CSSLink.href = '/assets/versions.css';
document.getElementsByTagName('head')[0].appendChild(CSSLink);
// Load JSONP Script
var script = document.createElement('script');
const currentVersion = getCurrentVersion();
script.src = 'https://argo-cd.readthedocs.io/_/api/v2/footer_html/?' +
@@ -74,58 +56,6 @@ function initializeVersionDropdown() {
document.getElementsByTagName('head')[0].appendChild(script);
}
// Function to sort version links
function sortVersionLinks(container) {
// Find all <dl> elements within the container
const dlElements = container.querySelectorAll('dl');
dlElements.forEach(dl => {
const dt = dl.querySelector('dt');
if (dt && dt.textContent.trim().toLowerCase() === 'versions') {
// Found the Versions <dl>
const ddElements = Array.from(dl.querySelectorAll('dd'));
// Define sorting criteria
ddElements.sort((a, b) => {
const aText = a.textContent.trim().toLowerCase();
const bText = b.textContent.trim().toLowerCase();
// Prioritize 'latest' and 'stable'
if (aText === 'latest') return -1;
if (bText === 'latest') return 1;
if (aText === 'stable') return -1;
if (bText === 'stable') return 1;
// Extract version numbers (e.g., release-2.9)
const aVersionMatch = aText.match(/release-(\d+(\.\d+)*)/);
const bVersionMatch = bText.match(/release-(\d+(\.\d+)*)/);
if (aVersionMatch && bVersionMatch) {
const aVersion = aVersionMatch[1].split('.').map(Number);
const bVersion = bVersionMatch[1].split('.').map(Number);
for (let i = 0; i < Math.max(aVersion.length, bVersion.length); i++) {
const aNum = aVersion[i] || 0;
const bNum = bVersion[i] || 0;
if (aNum > bNum) return -1;
if (aNum < bNum) return 1;
}
return 0;
}
// Fallback to alphabetical order
return aText.localeCompare(bText);
});
// Remove existing <dd> elements
ddElements.forEach(dd => dl.removeChild(dd));
// Append sorted <dd> elements
ddElements.forEach(dd => dl.appendChild(dd));
}
});
}
// VERSION WARNINGS
window.addEventListener("DOMContentLoaded", function() {
var margin = 30;

View File

@@ -60,38 +60,7 @@ data:
server: https://some-cluster
```
Proxy extensions can also be provided individually using dedicated
Argo CD configmap keys for better GitOps operations. The example below
demonstrates how to configure the same hypothetical httpbin config
above using a dedicated key:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
namespace: argocd
data:
extension.config.httpbin: |
connectionTimeout: 2s
keepAlive: 15s
idleConnectionTimeout: 60s
maxIdleConnections: 30
services:
- url: http://httpbin.org
headers:
- name: some-header
value: '$some.argocd.secret.key'
cluster:
name: some-cluster
server: https://some-cluster
```
Attention: Extension names must be unique in the Argo CD configmap. If
duplicated keys are found, the Argo CD API server will log an error
message and no proxy extension will be registered.
Note: There is no need to restart Argo CD Server after modifying the
Note: There is no need to restart Argo CD Server after modifiying the
`extension.config` entry in Argo CD configmap. Changes will be
automatically applied. A new proxy registry will be built making
all new incoming extensions requests (`<argocd-host>/extensions/*`) to
@@ -181,11 +150,12 @@ the argocd-secret with key 'some.argocd.secret.key'.
If provided, and multiple services are configured, will have to match
the application destination name or server to have requests properly
forwarded to this service URL. If there are multiple backends for the
same extension this field is required. In this case, it is necessary
to provide both values to avoid problems with applications unable to
send requests to the proper backend service. If only one backend
service is configured, this field is ignored, and all requests are
forwarded to the configured one.
same extension this field is required. In this case at least one of
the two will be required: name or server. It is better to provide both
values to avoid problems with applications unable to send requests to
the proper backend service. If only one backend service is
configured, this field is ignored, and all requests are forwarded to
the configured one.
#### `extensions.backend.services.cluster.name` (*string*)
(optional)
@@ -298,10 +268,6 @@ section for more details.
Will be populated with the username logged in Argo CD.
#### `Argocd-User-Groups`
Will be populated with the 'groups' claim from the user logged in Argo CD.
### Multi Backend Use-Case
In some cases when Argo CD is configured to sync with multiple remote

View File

@@ -1,26 +1,10 @@
# Overview
!!! warning "You probably don't want to be reading this section of the docs."
This part of the manual is aimed at helping people contribute to Argo CD, the documentation, or to develop third-party applications that interact with Argo CD, e.g.
This part of the manual is aimed at people wanting to develop third-party applications that interact with Argo CD, e.g.
* A chat bot
* A Slack integration
## Contributing to Argo CD
* [Code Contribution Guide](code-contributions/)
* [Contributors Quickstart](contributors-quickstart/)
* [Running Argo CD Locally](running-locally/)
Need help? Start with the [Contributors FAQ](faq/)
## Contributing to the Documentation
* [Building and Running Documentation Site Locally](docs-site/)
## Extensions and Third-Party Applications
* [UI Extensions](ui-extensions/)
* [Proxy Extensions](proxy-extensions/)
* [Config Management Plugins](../operator-manual/config-management-plugins/)
## Contributing to Argo Website
The Argo website is maintained in the [argo-site](https://github.com/argoproj/argo-site) repository.
!!! note
Please make sure you've completed the [getting started guide](../getting_started.md).

View File

@@ -1,8 +1,8 @@
# Documentation Site
# Site
## Developing And Testing
The [documentation website](https://argo-cd.readthedocs.io/) is built using `mkdocs` and `mkdocs-material`.
The website is built using `mkdocs` and `mkdocs-material`.
To test:
@@ -10,7 +10,7 @@ To test:
make serve-docs
```
Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/).
Making changes to documentation will automatically rebuild and refresh the view.
Make a change to documentation and the website will rebuild and refresh the view.
Before submitting a PR build the website, to verify that there are no errors building the site
```bash

View File

@@ -1,131 +0,0 @@
# Application Sync using impersonation
!!! warning "Alpha Feature"
This is an experimental, alpha-quality feature that allows you to control the service account used for the sync operation. The configured service account could have lesser privileges required for creating resources compared to the highly privileged access required for the control plane operations.
!!! warning
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
## Introduction
Argo CD supports syncing `Application` resources using the same service account used for its control plane operations. This feature enables users to decouple service account used for application sync from the service account used for control plane operations.
By default, application syncs in Argo CD have the same privileges as the Argo CD control plane. As a consequence, in a multi-tenant setup, the Argo CD control plane privileges needs to match the tenant that needs the highest privileges. As an example, if an Argo CD instance has 10 Applications and only one of them requires admin privileges, then the Argo CD control plane must have admin privileges in order to be able to sync that one Application. This provides an opportunity for malicious tenants to gain admin level access. Argo CD provides a multi-tenancy model to restrict what each `Application` is authorized to do using `AppProjects`, however it is not secure enough and if Argo CD is compromised, attackers will easily gain `cluster-admin` access to the cluster.
Some manual steps will need to be performed by the Argo CD administrator in order to enable this feature, as it is disabled by default.
!!! note
This feature is considered alpha as of now. Some of the implementation details may change over the course of time until it is promoted to a stable status. We will be happy if early adopters use this feature and provide us with bug reports and feedback.
### What is Impersonation
Impersonation is a feature in Kubernetes and enabled in the `kubectl` CLI client, using which, a user can act as another user through impersonation headers. For example, an admin could use this feature to debug an authorization policy by temporarily impersonating another user and seeing if a request was denied.
Impersonation requests first authenticate as the requesting user, then switch to the impersonated user info.
## Prerequisites
In a multi-team/multi-tenant environment, a team/tenant is typically granted access to a target namespace to self-manage their kubernetes resources in a declarative way.
A typical tenant onboarding process looks like below:
1. The platform admin creates a tenant namespace and the service account to be used for creating the resources is also created in the same tenant namespace.
2. The platform admin creates one or more Role(s) to manage kubernetes resources in the tenant namespace
3. The platform admin creates one or more RoleBinding(s) to map the service account to the role(s) created in the previous steps.
4. The platform admin can choose to use either the [apps-in-any-namespace](./app-any-namespace.md) feature or provide access to tenants to create applications in the ArgoCD control plane namespace.
5. If the platform admin chooses apps-in-any-namespace feature, tenants can self-service their Argo applications in their respective tenant namespaces and no additional access needs to be provided for the control plane namespace.
## Implementation details
### Overview
In order for an application to use a different service account for the application sync operation, the following steps needs to be performed:
1. The impersonation feature flag should be enabled. Please refer the steps provided in [Enable application sync with impersonation feature](#enable-application-sync-with-impersonation-feature)
2. The `AppProject` referenced by the `.spec.project` field of the `Application` must have the `DestinationServiceAccounts` mapping the destination server and namespace to a service account to be used for the sync operation. Please refer the steps provided in [Configuring destination service accounts](#configuring-destination-service-accounts)
### Enable application sync with impersonation feature
In order to enable this feature, the Argo CD administrator must reconfigure the `application.sync.impersonation.enabled` settings in the `argocd-cm` ConfigMap as below:
```yaml
data:
application.sync.impersonation.enabled: "true"
```
### Disable application sync with impersonation feature
In order to disable this feature, the Argo CD administrator must reconfigure the `application.sync.impersonation.enabled` settings in the `argocd-cm` ConfigMap as below:
```yaml
data:
application.sync.impersonation.enabled: "false"
```
!!! note
This feature is disabled by default.
!!! note
This feature can be enabled/disabled only at the system level and once enabled/disabled it is applicable to all Applications managed by ArgoCD.
## Configuring destination service accounts
Destination service accounts can be added to the `AppProject` under `.spec.destinationServiceAccounts`. Specify the target destination `server` and `namespace` and provide the service account to be used for the sync operation using `defaultServiceAccount` field. Applications that refer this `AppProject` will use the corresponding service account configured for its destination.
During the application sync operation, the controller loops through the available `destinationServiceAccounts` in the mapped `AppProject` and tries to find a matching candidate. If there are multiple matches for a destination server and namespace combination, then the first valid match will be considered. If there are no matches, then an error is reported during the sync operation. In order to avoid such sync errors, it is highly recommended that a valid service account may be configured as a catch-all configuration, for all target destinations and kept in lowest order of priority.
It is possible to specify service accounts along with its namespace. eg: `tenant1-ns:guestbook-deployer`. If no namespace is provided for the service account, then the Application's `spec.destination.namespace` will be used. If no namespace is provided for the service account and the optional `spec.destination.namespace` field is also not provided in the `Application`, then the Application's namespace will be used.
`DestinationServiceAccounts` associated to a `AppProject` can be created and managed, either declaratively or through the Argo CD API (e.g. using the CLI, the web UI, the REST API, etc).
### Using declarative yaml
For declaratively configuring destination service accounts, create an yaml file for the `AppProject` as below and apply the changes using `kubectl apply` command.
```yaml
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: my-project
namespace: argocd
spec:
description: Example Project
# Allow manifests to deploy from any Git repos
sourceRepos:
- '*'
destinations:
- '*'
destinationServiceAccounts:
- server: https://kubernetes.default.svc
namespace: guestbook
defaultServiceAccount: guestbook-deployer
- server: https://kubernetes.default.svc
namespace: guestbook-dev
defaultServiceAccount: guestbook-dev-deployer
- server: https://kubernetes.default.svc
namespace: guestbook-stage
defaultServiceAccount: guestbook-stage-deployer
- server: https://kubernetes.default.svc # catch-all configuration
namespace: '*'
defaultServiceAccount: default
```
### Using the CLI
Destination service accounts can be added to an `AppProject` using the ArgoCD CLI.
For example, to add a destination service account for `in-cluster` and `guestbook` namespace, you can use the following CLI command:
```shell
argocd proj add-destination-service-account my-project https://kubernetes.default.svc guestbook guestbook-sa
```
Likewise, to remove the destination service account from an `AppProject`, you can use the following CLI command:
```shell
argocd proj remove-destination-service-account my-project https://kubernetes.default.svc guestbook
```
### Using the UI
Similar to the CLI, you can add destination service account when creating or updating an `AppProject` from the UI

View File

@@ -119,7 +119,7 @@ spec:
forceCommonLabels: false
forceCommonAnnotations: false
images:
- quay.io/argoprojlabs/argocd-e2e-container:0.2
- gcr.io/heptio-images/ks-guestbook-demo:0.2
- my-app=gcr.io/my-repo/my-app:0.1
namespace: custom-namespace
replicas:

View File

@@ -22,8 +22,8 @@ As an example, imagine that we have two clusters:
And our application YAMLs are defined in a Git repository:
- [Argo Workflows controller](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/git-generator-directory/cluster-addons/argo-workflows)
- [Prometheus operator](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/git-generator-directory/cluster-addons/prometheus-operator)
- Argo Workflows controller (examples/git-generator-directory/cluster-addons/argo-workflows)
- Prometheus operator (/examples/git-generator-directory/cluster-addons/prometheus-operator)
Our goal is to deploy both applications onto both clusters, and, more generally, in the future to automatically deploy new applications in the Git repository, and to new clusters defined within Argo CD, as well.

View File

@@ -42,7 +42,6 @@ When the ApplicationSet changes, the changes will be applied to each group of Ap
* Sync operations are triggered the same way as if they were triggered by the UI or CLI (by directly setting the `operation` status field on the Application resource). This means that a RollingSync will respect sync windows just as if a user had clicked the "Sync" button in the Argo UI.
* When a sync is triggered, the sync is performed with the same syncPolicy configured for the Application. For example, this preserves the Application's retry settings.
* If an Application is considered "Pending" for `applicationsetcontroller.default.application.progressing.timeout` seconds, the Application is automatically moved to Healthy status (default 300).
* If an Application is not selected in any step, it will be excluded from the rolling sync and needs to be manually synced through the CLI or UI.
#### Example
The following example illustrates how to stage a progressive sync over Applications with explicitly configured environment labels.

View File

@@ -283,9 +283,6 @@ data:
# - annotation+label : Also uses an annotation for tracking, but additionally labels the resource with the application name
application.resourceTrackingMethod: annotation
# Optional installation id. Allows to have multiple installations of Argo CD in the same cluster.
installationID: "my-unique-id"
# disables admin user. Admin is enabled by default
admin.enabled: "false"
# add an additional local user with apiKey and login capabilities
@@ -332,14 +329,14 @@ data:
# spread out the refreshes and give time to the repo-server to catch up. The jitter is the maximum duration that can be
# added to the sync timeout. So, if the sync timeout is 3 minutes and the jitter is 1 minute, then the actual timeout will
# be between 3 and 4 minutes. Disabled when the value is 0, defaults to 0.
timeout.reconciliation.jitter: "0"
timeout.reconciliation.jitter: 0
# cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default.
cluster.inClusterEnabled: "true"
# The maximum number of pod logs to render in UI. If the application has more than this number of pods, the logs will not be rendered.
# This is to prevent the UI from becoming unresponsive when rendering a large number of logs. Default is 10.
server.maxPodLogsToRender: "10"
server.maxPodLogsToRender: 10
# Application pod logs RBAC enforcement enables control over who can and who can't view application pod logs.
# When you enable the switch, pod logs will be visible only to admin role by default. Other roles/users will not be able to view them via cli and UI.
@@ -428,7 +425,4 @@ data:
name: some-cluster
server: https://some-cluster
# The maximum size of the payload that can be sent to the webhook server.
webhook.maxPayloadSizeMB: "1024"
# application.sync.impersonation.enabled enables application sync to use a custom service account, via impersonation. This allows decoupling sync from control-plane service account.
application.sync.impersonation.enabled: "false"
webhook.maxPayloadSizeMB: 1024

View File

@@ -47,11 +47,8 @@ data:
controller.log.level: "info"
# Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
controller.metrics.cache.expiration: "24h0m0s"
# Specifies exponential backoff timeout parameters between application self heal attempts
controller.self.heal.timeout.seconds: "2"
controller.self.heal.backoff.factor: "3"
controller.self.heal.backoff.cap.seconds: "300"
# Specifies timeout between application self heal attempts (default 5)
controller.self.heal.timeout.seconds: "5"
# Cache expiration for app state (default 1h0m0s)
controller.app.state.cache.expiration: "1h0m0s"
# Specifies if resource health should be persisted in app CRD (default true)

View File

@@ -818,9 +818,9 @@ stringData:
}
}
```
This will instruct Argo CD to read the file at the provided path and use the credentials defined within to authenticate to AWS.
The profile must be mounted in both the `argocd-server` and `argocd-application-controller` components in order for this to work.
For example, the following values can be defined in a Helm-based Argo CD deployment:
This will instruct ArgoCD to read the file at the provided path and use the credentials defined within to authenticate to
AWS. The profile must be mounted in order for this to work. For example, the following values can be defined in a Helm
based ArgoCD deployment:
```yaml
controller:

View File

@@ -98,27 +98,20 @@ data:
return hs
```
In order to prevent duplication of custom health checks for potentially multiple resources, it is also possible to
specify a wildcard in the resource kind, and anywhere in the resource group, like this:
In order to prevent duplication of the custom health check for potentially multiple resources, it is also possible to specify a wildcard in the resource kind, and anywhere in the resource group, like this:
```yaml
resource.customizations: |
ec2.aws.crossplane.io/*:
health.lua: |
...
resource.customizations.health.ec2.aws.crossplane.io_*: |
...
```
```yaml
# If a key _begins_ with a wildcard, please ensure that the GVK key is quoted.
resource.customizations: |
"*.aws.crossplane.io/*":
health.lua: |
...
resource.customizations.health.*.aws.crossplane.io_*: |
...
```
!!!important
Please, note that wildcards are only supported when using the `resource.customizations` key, the `resource.customizations.health.<group>_<kind>`
style keys do not work since wildcards (`*`) are not supported in Kubernetes configmap keys.
Please, note that there can be ambiguous resolution of wildcards, see [#16905](https://github.com/argoproj/argo-cd/issues/16905)
The `obj` is a global variable which contains the resource. The script must return an object with status and optional message field.
The custom health check might return one of the following health statuses:
@@ -128,7 +121,7 @@ The custom health check might return one of the following health statuses:
* `Degraded` - the resource is degraded
* `Suspended` - the resource is suspended and waiting for some external event to resume (e.g. suspended CronJob or paused Deployment)
By default, health typically returns a `Progressing` status.
By default health typically returns `Progressing` status.
NOTE: As a security measure, access to the standard Lua libraries will be disabled by default. Admins can control access by
setting `resource.customizations.useOpenLibs.<group>_<kind>`. In the following example, standard libraries are enabled for health check of `cert-manager.io/Certificate`.

View File

@@ -21,9 +21,6 @@ Not recommended for production use. This type of installation is typically used
in (i.e. kubernetes.svc.default). It will still be able to deploy to external clusters with inputted
credentials.
> Note: The ClusterRoleBinding in the installation manifest is bound to a ServiceAccount in the argocd namespace.
> Be cautious when modifying the namespace, as changing it may cause permission-related errors unless the ClusterRoleBinding is correctly adjusted to reflect the new namespace.
* [namespace-install.yaml](https://github.com/argoproj/argo-cd/blob/master/manifests/namespace-install.yaml) - Installation of Argo CD which requires only
namespace level privileges (does not need cluster roles). Use this manifest set if you do not
need Argo CD to deploy applications in the same cluster that Argo CD runs in, and will rely solely
@@ -81,29 +78,6 @@ resources:
For an example of this, see the [kustomization.yaml](https://github.com/argoproj/argoproj-deployments/blob/master/argocd/kustomization.yaml)
used to deploy the [Argoproj CI/CD infrastructure](https://github.com/argoproj/argoproj-deployments#argoproj-deployments).
#### Installing Argo CD in a Custom Namespace
If you want to install Argo CD in a namespace other than the default argocd, you can use Kustomize to apply a patch that updates the ClusterRoleBinding to reference the correct namespace for the ServiceAccount. This ensures that the necessary permissions are correctly set in your custom namespace.
Below is an example of how to configure your kustomization.yaml to install Argo CD in a custom namespace:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: <your-custom-namespace>
resources:
- https://raw.githubusercontent.com/argoproj/argo-cd/v2.7.2/manifests/install.yaml
patches:
- patch: |-
- op: replace
path: /subjects/0/namespace
value: <your-custom-namespace>
target:
kind: ClusterRoleBinding
```
This patch ensures that the ClusterRoleBinding correctly maps to the ServiceAccount in your custom namespace, preventing any permission-related issues during the deployment.
## Helm
The Argo CD can be installed using [Helm](https://helm.sh/). The Helm chart is currently community maintained and available at

View File

@@ -8,7 +8,6 @@ Metrics about applications. Scraped at the `argocd-metrics:8082/metrics` endpoin
| Metric | Type | Description |
|--------|:----:|-------------|
| `argocd_app_info` | gauge | Information about Applications. It contains labels such as `sync_status` and `health_status` that reflect the application state in Argo CD. |
| `argocd_app_condition` | gauge | Report Applications conditions. It contains the conditions currently present in the application status. |
| `argocd_app_k8s_request_total` | counter | Number of Kubernetes requests executed during application reconciliation |
| `argocd_app_labels` | gauge | Argo Application labels converted to Prometheus labels. Disabled by default. See section below about how to enable it. |
| `argocd_app_reconcile` | histogram | Application reconciliation performance in seconds. |
@@ -31,8 +30,6 @@ to deleted resources, you can schedule a metrics reset to clean the
history with an application controller flag. Example:
`--metrics-cache-expiration="24h0m0s"`.
### Exposing Application labels as Prometheus metrics
There are use-cases where Argo CD Applications contain labels that are desired to be exposed as Prometheus metrics.
@@ -63,45 +60,6 @@ argocd_app_labels{label_business_unit="bu-id-1",label_team_name="my-team",name="
argocd_app_labels{label_business_unit="bu-id-2",label_team_name="another-team",name="my-app-3",namespace="argocd",project="important-project"} 1
```
### Exposing Application conditions as Prometheus metrics
There are use-cases where Argo CD Applications contain conditions that are desired to be exposed as Prometheus metrics.
Some examples are:
* Hunting orphaned resources across all deployed applications
* Knowing which resources are excluded from ArgoCD
As the Application conditions are specific to each company, this feature is disabled by default. To enable it, add the
`--metrics-application-conditions` flag to the Argo CD application controller.
The example below will expose the Argo CD Application condition `OrphanedResourceWarning` and `ExcludedResourceWarning` to Prometheus:
```yaml
containers:
- command:
- argocd-application-controller
- --metrics-application-conditions
- OrphanedResourceWarning
- --metrics-application-conditions
- ExcludedResourceWarning
```
## Application Set Controller metrics
The Application Set controller exposes the following metrics for application sets.
| Metric | Type | Description |
|--------|:----:|-------------|
| `argocd_appset_info` | gauge | Information about Application Sets. It contains labels for the name and namespace of an application set as well as `Resource_update_status` that reflects the `ResourcesUpToDate` property |
| `argocd_appset_reconcile` | histogram | Application reconciliation performance in seconds. It contains labels for the name and namespace of an applicationset |
| `argocd_appset_labels` | gauge | Applicationset labels translated to Prometheus labels. Disabled by default |
| `argocd_appset_owned_applications` | gauge | Number of applications owned by the applicationset. It contains labels for the name and namespace of an applicationset. |
Similar to the same metric in application controller (`argocd_app_labels`) the metric `argocd_appset_labels` is disabled by default. You can enable it by providing the `metrics-applicationset-labels` argument to the applicationset controller.
Once enabled it works exactly the same as application controller metrics (label_ appended to normalized label name).
Available labels include Name, Namespace + all labels enabled by the command line options and their value (exactly like application controller metrics described in the previous section).
## API Server Metrics
Metrics about API Server API request and response activity (request totals, response codes, etc...).
Scraped at the `argocd-server-metrics:8083/metrics` endpoint.

View File

@@ -122,19 +122,9 @@ To do so, when the action if performed on an application's resource, the `<actio
For instance, to grant access to `example-user` to only delete Pods in the `prod-app` Application, the policy could be:
```csv
p, example-user, applications, delete/*/Pod/*/*, default/prod-app, allow
p, example-user, applications, delete/*/Pod/*, default/prod-app, allow
```
!!!warning "Understand glob pattern behavior"
Argo CD RBAC does not use `/` as a separator when evaluating glob patterns. So the pattern `delete/*/kind/*`
will match `delete/<group>/kind/<namespace>/<name>` but also `delete/<group>/<kind>/kind/<name>`.
The fact that both of these match will generally not be a problem, because resource kinds generally contain capital
letters, and namespaces cannot contain capital letters. However, it is possible for a resource kind to be lowercase.
So it is better to just always include all the parts of the resource in the pattern (in other words, always use four
slashes).
If we want to grant access to the user to update all resources of an application, but not the application itself:
```csv
@@ -145,7 +135,7 @@ If we want to explicitly deny delete of the application, but allow the user to d
```csv
p, example-user, applications, delete, default/prod-app, deny
p, example-user, applications, delete/*/Pod/*/*, default/prod-app, allow
p, example-user, applications, delete/*/Pod/*, default/prod-app, allow
```
!!! note
@@ -155,7 +145,7 @@ p, example-user, applications, delete/*/Pod/*/*, default/prod-app, allow
```csv
p, example-user, applications, delete, default/prod-app, allow
p, example-user, applications, delete/*/Pod/*/*, default/prod-app, deny
p, example-user, applications, delete/*/Pod/*, default/prod-app, deny
```
#### The `action` action

View File

@@ -80,20 +80,6 @@ The `discovery.lua` script must return a table where the key name represents the
Each action name must be represented in the list of `definitions` with an accompanying `action.lua` script to control the resource modifications. The `obj` is a global variable which contains the resource. Each action script returns an optionally modified version of the resource. In this example, we are simply setting `.spec.suspend` to either `true` or `false`.
By default, defining a resource action customization will override any built-in action for this resource kind. If you want to retain the built-in actions, you can set the `mergeBuiltinActions` key to `true`. Your custom actions will have precedence over the built-in actions.
```yaml
resource.customizations.actions.argoproj.io_Rollout: |
mergeBuiltinActions: true
discovery.lua: |
actions = {}
actions["do-things"] = {}
return actions
definitions:
- name: do-things
action.lua: |
return obj
```
#### Creating new resources with a custom action
!!! important

View File

@@ -31,7 +31,6 @@ argocd-application-controller [flags]
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
--disable-compression If true, opt-out of response compression for all requests to the server
--dynamic-cluster-distribution-enabled Enables dynamic cluster distribution.
--enable-k8s-event none Enable ArgoCD to use k8s event. For disabling all events, set the value as none. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated) (default [all])
--gloglevel int Set the glog logging level
-h, --help help for argocd-application-controller
--ignore-normalizer-jq-execution-timeout-seconds duration Set ignore normalizer JQ execution timeout
@@ -40,7 +39,6 @@ argocd-application-controller [flags]
--kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20)
--logformat string Set the logging format. One of: text|json (default "text")
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
--metrics-application-conditions strings List of Application conditions that will be added to the argocd_application_conditions metric
--metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric
--metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
--metrics-port int Start metrics server on given port (default 8082)
@@ -67,10 +65,7 @@ argocd-application-controller [flags]
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
--self-heal-backoff-cap-seconds int Specifies max timeout of exponential backoff between application self heal attempts (default 300)
--self-heal-backoff-factor int Specifies factor of exponential timeout between application self heal attempts (default 3)
--self-heal-backoff-timeout-seconds int Specifies initial timeout of exponential backoff between self heal attempts (default 2)
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
--sentinelmaster string Redis sentinel master group name. (default "master")
--server string The address and port of the Kubernetes API server

View File

@@ -51,7 +51,6 @@ argocd-server [flags]
--disable-auth Disable client authentication
--disable-compression If true, opt-out of response compression for all requests to the server
--enable-gzip Enable GZIP compression (default true)
--enable-k8s-event none Enable ArgoCD to use k8s event. For disabling all events, set the value as none. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated) (default [all])
--enable-proxy-extension Enable Proxy Extension feature
--gloglevel int Set the glog logging level
-h, --help help for argocd-server

View File

@@ -1,5 +1,2 @@
| Argo CD version | Kubernetes versions |
|-----------------|---------------------|
| 2.13 | v1.30, v1.29, v1.28, v1.27 |
| 2.12 | v1.29, v1.28, v1.27, v1.26 |
| 2.11 | v1.29, v1.28, v1.27, v1.26, v1.25 |
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
version.

Some files were not shown because too many files have changed in this diff Show More