Compare commits

..

1 Commits

Author SHA1 Message Date
github-actions[bot]
d91a2ab3bf Bump version to 3.4.0-rc1 on release-3.4 branch (#26853)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: reggie-k <19544836+reggie-k@users.noreply.github.com>
2026-03-16 12:43:12 +02:00
83 changed files with 412 additions and 2892 deletions

View File

@@ -11,7 +11,6 @@ module.exports = {
"github>argoproj/argo-cd//renovate-presets/custom-managers/yaml.json5",
"github>argoproj/argo-cd//renovate-presets/fix/disable-all-updates.json5",
"github>argoproj/argo-cd//renovate-presets/devtool.json5",
"github>argoproj/argo-cd//renovate-presets/docs.json5",
"group:aws-sdk-go-v2Monorepo"
"github>argoproj/argo-cd//renovate-presets/docs.json5"
]
}

View File

@@ -66,7 +66,6 @@ jobs:
# Create new branch for cherry-pick
CHERRY_PICK_BRANCH="cherry-pick-${{ inputs.pr_number }}-to-${TARGET_BRANCH}"
git checkout -b "$CHERRY_PICK_BRANCH" "origin/$TARGET_BRANCH"
# Perform cherry-pick
@@ -76,17 +75,12 @@ jobs:
# Extract Signed-off-by from the cherry-pick commit
SIGNOFF=$(git log -1 --pretty=format:"%B" | grep -E '^Signed-off-by:' || echo "")
# Push the new branch. Force push to ensure that in case the original cherry-pick branch is stale,
# that the current state of the $TARGET_BRANCH + cherry-pick gets in $CHERRY_PICK_BRANCH.
git push origin -f "$CHERRY_PICK_BRANCH"
# Push the new branch
git push origin "$CHERRY_PICK_BRANCH"
# Save data for PR creation
echo "branch_name=$CHERRY_PICK_BRANCH" >> "$GITHUB_OUTPUT"
{
echo "signoff<<EOF"
echo "$SIGNOFF"
echo "EOF"
} >> "$GITHUB_OUTPUT"
echo "signoff=$SIGNOFF" >> "$GITHUB_OUTPUT"
echo "target_branch=$TARGET_BRANCH" >> "$GITHUB_OUTPUT"
else
echo "❌ Cherry-pick failed due to conflicts"

View File

@@ -80,16 +80,12 @@ jobs:
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Restore go build and module cache
- name: Restore go build cache
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-build-v1-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-build-v1-
- name: Download Go modules
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Download all Go modules
run: |
go mod download
- name: Compile all packages
@@ -155,15 +151,11 @@ jobs:
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build and module cache
- name: Restore go build cache
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-build-v1-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-build-v1-
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
@@ -175,7 +167,7 @@ jobs:
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Download Go modules
- name: Download and vendor all required packages
run: |
go mod download
- name: Run all unit tests
@@ -223,15 +215,11 @@ jobs:
- name: Add /usr/local/bin to PATH
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build and module cache
- name: Restore go build cache
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-build-v1-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-build-v1-
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Install all tools required for building & testing
run: |
make install-test-tools-local
@@ -243,7 +231,7 @@ jobs:
run: |
git config --global user.name "John Doe"
git config --global user.email "john.doe@example.com"
- name: Download Go modules
- name: Download and vendor all required packages
run: |
go mod download
- name: Run all unit tests
@@ -487,15 +475,11 @@ jobs:
sudo chown $(whoami) $HOME/.kube/config
sudo chmod go-r $HOME/.kube/config
kubectl version
- name: Restore go build and module cache
- name: Restore go build cache
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-build-v1-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-build-v1-
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
- name: Add ~/go/bin to PATH
run: |
echo "$HOME/go/bin" >> $GITHUB_PATH
@@ -505,11 +489,9 @@ jobs:
- name: Add ./dist to PATH
run: |
echo "$(pwd)/dist" >> $GITHUB_PATH
- name: Download Go modules
- name: Download Go dependencies
run: |
go mod download
- name: Install goreman
run: |
go install github.com/mattn/goreman@latest
- name: Install all tools required for building & testing
run: |

View File

@@ -264,7 +264,7 @@ jobs:
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
- name: Upload SBOM
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2.6.1
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2.5.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -2,7 +2,7 @@ controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v3/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex:v2.45.0" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
redis: hack/start-redis-with-password.sh
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "export PATH=\$(pwd)/dist:\$PATH && [ -n \"\$ARGOCD_GIT_CONFIG\" ] && export GIT_CONFIG_GLOBAL=\$ARGOCD_GIT_CONFIG && export GIT_CONFIG_NOSYSTEM=1; GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "export PATH=./dist:\$PATH && [ -n \"\$ARGOCD_GIT_CONFIG\" ] && export GIT_CONFIG_GLOBAL=\$ARGOCD_GIT_CONFIG && export GIT_CONFIG_NOSYSTEM=1; GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
commit-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/commit-server} FORCE_LOG_COLORS=1 ARGOCD_BINARY_NAME=argocd-commit-server $COMMAND --loglevel debug --port ${ARGOCD_E2E_COMMITSERVER_PORT:-8086}"
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'

View File

@@ -3,9 +3,9 @@ header:
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
last-updated: '2023-10-27'
last-reviewed: '2023-10-27'
commit-hash: d91a2ab3bf1b1143fb273fa06f54073fc78f41f1
commit-hash: 814db444c36503851dc3d45cf9c44394821ca1a4
project-url: https://github.com/argoproj/argo-cd
project-release: v3.5.0
project-release: v3.4.0
changelog: https://github.com/argoproj/argo-cd/releases
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
project-lifecycle:

View File

@@ -240,7 +240,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Mission Lane](https://missionlane.com)
1. [mixi Group](https://mixi.co.jp/)
1. [Moengage](https://www.moengage.com/)
1. [Mollie](https://www.mollie.com/)
1. [Money Forward](https://corp.moneyforward.com/en/)
1. [MongoDB](https://www.mongodb.com/)
1. [MOO Print](https://www.moo.com/)
@@ -381,7 +380,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Tailor Brands](https://www.tailorbrands.com)
1. [Tamkeen Technologies](https://tamkeentech.sa/)
1. [TBC Bank](https://tbcbank.ge/)
1. [Techcom Securities](https://www.tcbs.com.vn/)
1. [Techcombank](https://www.techcombank.com.vn/trang-chu)
1. [Technacy](https://www.technacy.it/)
1. [Telavita](https://www.telavita.com.br/)

View File

@@ -1 +1 @@
3.5.0
3.4.0-rc1

View File

@@ -24,13 +24,11 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -105,16 +103,15 @@ type ApplicationSetReconciler struct {
Policy argov1alpha1.ApplicationsSyncPolicy
EnablePolicyOverride bool
utils.Renderer
ArgoCDNamespace string
ApplicationSetNamespaces []string
EnableProgressiveSyncs bool
SCMRootCAPath string
GlobalPreservedAnnotations []string
GlobalPreservedLabels []string
Metrics *metrics.ApplicationsetMetrics
MaxResourcesStatusCount int
ClusterInformer *settings.ClusterInformer
ConcurrentApplicationUpdates int
ArgoCDNamespace string
ApplicationSetNamespaces []string
EnableProgressiveSyncs bool
SCMRootCAPath string
GlobalPreservedAnnotations []string
GlobalPreservedLabels []string
Metrics *metrics.ApplicationsetMetrics
MaxResourcesStatusCount int
ClusterInformer *settings.ClusterInformer
}
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
@@ -691,133 +688,108 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
// - For existing application, it will call update
// The function also adds owner reference to all applications, and uses it to delete them.
func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
// Build the diff config once per reconcile.
// Diff config is per applicationset, so generate it once for all applications
diffConfig, err := utils.BuildIgnoreDiffConfig(applicationSet.Spec.IgnoreApplicationDifferences, normalizers.IgnoreNormalizerOpts{})
if err != nil {
return fmt.Errorf("failed to build ignore diff config: %w", err)
}
g, ctx := errgroup.WithContext(ctx)
concurrency := r.concurrency()
g.SetLimit(concurrency)
var appErrorsMu sync.Mutex
appErrors := map[string]error{}
var firstError error
// Creates or updates the application in appList
for _, generatedApp := range desiredApplications {
appLog := logCtx.WithFields(applog.GetAppLogFields(&generatedApp))
// Normalize to avoid fighting with the application controller.
generatedApp.Spec = *argoutil.NormalizeApplicationSpec(&generatedApp.Spec)
g.Go(func() error {
appLog := logCtx.WithFields(applog.GetAppLogFields(&generatedApp))
found := &argov1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: generatedApp.Name,
Namespace: generatedApp.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
found := &argov1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: generatedApp.Name,
Namespace: generatedApp.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
}
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, normalizers.IgnoreNormalizerOpts{}, found, func() error {
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
found.Spec = generatedApp.Spec
// allow setting the Operation field to trigger a sync operation on an Application
if generatedApp.Operation != nil {
found.Operation = generatedApp.Operation
}
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, diffConfig, found, func() error {
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
found.Spec = generatedApp.Spec
preservedAnnotations := make([]string, 0)
preservedLabels := make([]string, 0)
// allow setting the Operation field to trigger a sync operation on an Application
if generatedApp.Operation != nil {
found.Operation = generatedApp.Operation
}
preservedAnnotations := make([]string, 0)
preservedLabels := make([]string, 0)
if applicationSet.Spec.PreservedFields != nil {
preservedAnnotations = append(preservedAnnotations, applicationSet.Spec.PreservedFields.Annotations...)
preservedLabels = append(preservedLabels, applicationSet.Spec.PreservedFields.Labels...)
}
if len(r.GlobalPreservedAnnotations) > 0 {
preservedAnnotations = append(preservedAnnotations, r.GlobalPreservedAnnotations...)
}
if len(r.GlobalPreservedLabels) > 0 {
preservedLabels = append(preservedLabels, r.GlobalPreservedLabels...)
}
// Preserve specially treated argo cd annotations:
// * https://github.com/argoproj/applicationset/issues/180
// * https://github.com/argoproj/argo-cd/issues/10500
preservedAnnotations = append(preservedAnnotations, defaultPreservedAnnotations...)
for _, key := range preservedAnnotations {
if state, exists := found.Annotations[key]; exists {
if generatedApp.Annotations == nil {
generatedApp.Annotations = map[string]string{}
}
generatedApp.Annotations[key] = state
}
}
for _, key := range preservedLabels {
if state, exists := found.Labels[key]; exists {
if generatedApp.Labels == nil {
generatedApp.Labels = map[string]string{}
}
generatedApp.Labels[key] = state
}
}
// Preserve deleting finalizers and avoid diff conflicts
for _, finalizer := range defaultPreservedFinalizers {
for _, f := range found.Finalizers {
// For finalizers, use prefix matching in case it contains "/" stages
if strings.HasPrefix(f, finalizer) {
generatedApp.Finalizers = append(generatedApp.Finalizers, f)
}
}
}
found.Annotations = generatedApp.Annotations
found.Labels = generatedApp.Labels
found.Finalizers = generatedApp.Finalizers
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
})
if err != nil {
appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action)
// If the context was canceled or its deadline exceeded, return the error so it propagates through g.Wait().
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return err
}
// For backwards compatibility with sequential behavior: continue processing other applications
// but record the error keyed by app name so we can deterministically return the error from
// the lexicographically first failing app, regardless of goroutine scheduling order.
appErrorsMu.Lock()
appErrors[generatedApp.Name] = err
appErrorsMu.Unlock()
return nil
if applicationSet.Spec.PreservedFields != nil {
preservedAnnotations = append(preservedAnnotations, applicationSet.Spec.PreservedFields.Annotations...)
preservedLabels = append(preservedLabels, applicationSet.Spec.PreservedFields.Labels...)
}
if action != controllerutil.OperationResultNone {
// Don't pollute etcd with "unchanged Application" events
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
appLog.Logf(log.InfoLevel, "%s Application", action)
} else {
// "unchanged Application" can be inferred by Reconcile Complete with no action being listed
// Or enable debug logging
appLog.Logf(log.DebugLevel, "%s Application", action)
if len(r.GlobalPreservedAnnotations) > 0 {
preservedAnnotations = append(preservedAnnotations, r.GlobalPreservedAnnotations...)
}
return nil
if len(r.GlobalPreservedLabels) > 0 {
preservedLabels = append(preservedLabels, r.GlobalPreservedLabels...)
}
// Preserve specially treated argo cd annotations:
// * https://github.com/argoproj/applicationset/issues/180
// * https://github.com/argoproj/argo-cd/issues/10500
preservedAnnotations = append(preservedAnnotations, defaultPreservedAnnotations...)
for _, key := range preservedAnnotations {
if state, exists := found.Annotations[key]; exists {
if generatedApp.Annotations == nil {
generatedApp.Annotations = map[string]string{}
}
generatedApp.Annotations[key] = state
}
}
for _, key := range preservedLabels {
if state, exists := found.Labels[key]; exists {
if generatedApp.Labels == nil {
generatedApp.Labels = map[string]string{}
}
generatedApp.Labels[key] = state
}
}
// Preserve deleting finalizers and avoid diff conflicts
for _, finalizer := range defaultPreservedFinalizers {
for _, f := range found.Finalizers {
// For finalizers, use prefix matching in case it contains "/" stages
if strings.HasPrefix(f, finalizer) {
generatedApp.Finalizers = append(generatedApp.Finalizers, f)
}
}
}
found.Annotations = generatedApp.Annotations
found.Labels = generatedApp.Labels
found.Finalizers = generatedApp.Finalizers
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
})
}
if err != nil {
appLog.WithError(err).WithField("action", action).Errorf("failed to %s Application", action)
if firstError == nil {
firstError = err
}
continue
}
if err := g.Wait(); errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return err
if action != controllerutil.OperationResultNone {
// Don't pollute etcd with "unchanged Application" events
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
appLog.Logf(log.InfoLevel, "%s Application", action)
} else {
// "unchanged Application" can be inferred by Reconcile Complete with no action being listed
// Or enable debug logging
appLog.Logf(log.DebugLevel, "%s Application", action)
}
}
return firstAppError(appErrors)
return firstError
}
// createInCluster will filter from the desiredApplications only the application that needs to be created
@@ -877,84 +849,36 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
m[app.Name] = true
}
g, ctx := errgroup.WithContext(ctx)
concurrency := r.concurrency()
g.SetLimit(concurrency)
var appErrorsMu sync.Mutex
appErrors := map[string]error{}
// Delete apps that are not in m[string]bool
var firstError error
for _, app := range current {
logCtx = logCtx.WithFields(applog.GetAppLogFields(&app))
_, exists := m[app.Name]
if exists {
continue
}
appLogCtx := logCtx.WithFields(applog.GetAppLogFields(&app))
g.Go(func() error {
if !exists {
// Removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, appLogCtx)
err := r.removeFinalizerOnInvalidDestination(ctx, applicationSet, &app, clusterList, logCtx)
if err != nil {
appLogCtx.WithError(err).Error("failed to update Application")
// If the context was canceled or its deadline exceeded, return the error so it propagates through g.Wait().
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return err
logCtx.WithError(err).Error("failed to update Application")
if firstError != nil {
firstError = err
}
// For backwards compatibility with sequential behavior: continue processing other applications
// but record the error keyed by app name so we can deterministically return the error from
// the lexicographically first failing app, regardless of goroutine scheduling order.
appErrorsMu.Lock()
appErrors[app.Name] = err
appErrorsMu.Unlock()
return nil
continue
}
err = r.Delete(ctx, &app)
if err != nil {
appLogCtx.WithError(err).Error("failed to delete Application")
// If the context was canceled or its deadline exceeded, return the error so it propagates through g.Wait().
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return err
logCtx.WithError(err).Error("failed to delete Application")
if firstError != nil {
firstError = err
}
appErrorsMu.Lock()
appErrors[app.Name] = err
appErrorsMu.Unlock()
return nil
continue
}
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Deleted", "Deleted Application %q", app.Name)
appLogCtx.Log(log.InfoLevel, "Deleted application")
return nil
})
logCtx.Log(log.InfoLevel, "Deleted application")
}
}
if err := g.Wait(); errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return err
}
return firstAppError(appErrors)
}
// concurrency returns the configured number of concurrent application updates, defaulting to 1.
func (r *ApplicationSetReconciler) concurrency() int {
if r.ConcurrentApplicationUpdates <= 0 {
return 1
}
return r.ConcurrentApplicationUpdates
}
// firstAppError returns the error associated with the lexicographically smallest application name
// in the provided map. This gives a deterministic result when multiple goroutines may have
// recorded errors concurrently, matching the behavior of the original sequential loop where the
// first application in iteration order would determine the returned error.
func firstAppError(appErrors map[string]error) error {
if len(appErrors) == 0 {
return nil
}
names := make([]string, 0, len(appErrors))
for name := range appErrors {
names = append(names, name)
}
sort.Strings(names)
return appErrors[names[0]]
return firstError
}
// removeFinalizerOnInvalidDestination removes the Argo CD resources finalizer if the application contains an invalid target (eg missing cluster)

View File

@@ -25,7 +25,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/client/interceptor"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
@@ -1078,70 +1077,6 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
},
},
},
{
name: "Ensure that unnormalized live spec does not cause a spurious patch",
appSet: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
Spec: v1alpha1.ApplicationSetSpec{
Template: v1alpha1.ApplicationSetTemplate{
Spec: v1alpha1.ApplicationSpec{
Project: "project",
},
},
},
},
existingApps: []v1alpha1.Application{
{
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
ResourceVersion: "2",
},
Spec: v1alpha1.ApplicationSpec{
Project: "project",
// Without normalizing the live object, the equality check
// sees &SyncPolicy{} vs nil and issues an unnecessary patch.
SyncPolicy: &v1alpha1.SyncPolicy{},
},
},
},
desiredApps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
},
Spec: v1alpha1.ApplicationSpec{
Project: "project",
SyncPolicy: nil,
},
},
},
expected: []v1alpha1.Application{
{
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
ResourceVersion: "2",
},
Spec: v1alpha1.ApplicationSpec{
Project: "project",
SyncPolicy: &v1alpha1.SyncPolicy{},
},
},
},
},
{
name: "Ensure that argocd pre-delete and post-delete finalizers are preserved from an existing app",
appSet: v1alpha1.ApplicationSet{
@@ -1251,374 +1186,6 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
}
}
func TestCreateOrUpdateInCluster_Concurrent(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
appSet := v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
}
t.Run("all apps are created correctly with concurrency > 1", func(t *testing.T) {
desiredApps := make([]v1alpha1.Application, 5)
for i := range desiredApps {
desiredApps[i] = v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("app%d", i),
Namespace: "namespace",
},
Spec: v1alpha1.ApplicationSpec{Project: "project"},
}
}
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&appSet).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
r := ApplicationSetReconciler{
Client: fakeClient,
Scheme: scheme,
Recorder: record.NewFakeRecorder(10),
Metrics: metrics,
ConcurrentApplicationUpdates: 5,
}
err = r.createOrUpdateInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, desiredApps)
require.NoError(t, err)
for _, desired := range desiredApps {
got := &v1alpha1.Application{}
require.NoError(t, fakeClient.Get(t.Context(), crtclient.ObjectKey{Namespace: desired.Namespace, Name: desired.Name}, got))
assert.Equal(t, desired.Spec.Project, got.Spec.Project)
}
})
t.Run("non-context errors from concurrent goroutines are collected and one is returned", func(t *testing.T) {
existingApps := make([]v1alpha1.Application, 5)
initObjs := []crtclient.Object{&appSet}
for i := range existingApps {
existingApps[i] = v1alpha1.Application{
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("app%d", i),
Namespace: "namespace",
ResourceVersion: "1",
},
Spec: v1alpha1.ApplicationSpec{Project: "old"},
}
app := existingApps[i].DeepCopy()
require.NoError(t, controllerutil.SetControllerReference(&appSet, app, scheme))
initObjs = append(initObjs, app)
}
desiredApps := make([]v1alpha1.Application, 5)
for i := range desiredApps {
desiredApps[i] = v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("app%d", i),
Namespace: "namespace",
},
Spec: v1alpha1.ApplicationSpec{Project: "new"},
}
}
patchErr := errors.New("some patch error")
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(initObjs...).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Patch: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ crtclient.Patch, _ ...crtclient.PatchOption) error {
return patchErr
},
}).
Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
r := ApplicationSetReconciler{
Client: fakeClient,
Scheme: scheme,
Recorder: record.NewFakeRecorder(10),
Metrics: metrics,
ConcurrentApplicationUpdates: 5,
}
err = r.createOrUpdateInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, desiredApps)
require.ErrorIs(t, err, patchErr)
})
}
func TestCreateOrUpdateInCluster_ContextCancellation(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
appSet := v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
}
existingApp := v1alpha1.Application{
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
ResourceVersion: "1",
},
Spec: v1alpha1.ApplicationSpec{Project: "old"},
}
desiredApp := v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
},
Spec: v1alpha1.ApplicationSpec{Project: "new"},
}
t.Run("context canceled on patch is returned directly", func(t *testing.T) {
initObjs := []crtclient.Object{&appSet}
app := existingApp.DeepCopy()
err = controllerutil.SetControllerReference(&appSet, app, scheme)
require.NoError(t, err)
initObjs = append(initObjs, app)
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(initObjs...).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Patch: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ crtclient.Patch, _ ...crtclient.PatchOption) error {
return context.Canceled
},
}).
Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
r := ApplicationSetReconciler{
Client: fakeClient,
Scheme: scheme,
Recorder: record.NewFakeRecorder(10),
Metrics: metrics,
}
err = r.createOrUpdateInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, []v1alpha1.Application{desiredApp})
require.ErrorIs(t, err, context.Canceled)
})
t.Run("context deadline exceeded on patch is returned directly", func(t *testing.T) {
initObjs := []crtclient.Object{&appSet}
app := existingApp.DeepCopy()
err = controllerutil.SetControllerReference(&appSet, app, scheme)
require.NoError(t, err)
initObjs = append(initObjs, app)
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(initObjs...).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Patch: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ crtclient.Patch, _ ...crtclient.PatchOption) error {
return context.DeadlineExceeded
},
}).
Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
r := ApplicationSetReconciler{
Client: fakeClient,
Scheme: scheme,
Recorder: record.NewFakeRecorder(10),
Metrics: metrics,
}
err = r.createOrUpdateInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, []v1alpha1.Application{desiredApp})
require.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("non-context error is collected and returned after all goroutines finish", func(t *testing.T) {
initObjs := []crtclient.Object{&appSet}
app := existingApp.DeepCopy()
err = controllerutil.SetControllerReference(&appSet, app, scheme)
require.NoError(t, err)
initObjs = append(initObjs, app)
patchErr := errors.New("some patch error")
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(initObjs...).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Patch: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ crtclient.Patch, _ ...crtclient.PatchOption) error {
return patchErr
},
}).
Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
r := ApplicationSetReconciler{
Client: fakeClient,
Scheme: scheme,
Recorder: record.NewFakeRecorder(10),
Metrics: metrics,
}
err = r.createOrUpdateInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, []v1alpha1.Application{desiredApp})
require.ErrorIs(t, err, patchErr)
})
t.Run("context canceled on create is returned directly", func(t *testing.T) {
initObjs := []crtclient.Object{&appSet}
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(initObjs...).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Create: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ ...crtclient.CreateOption) error {
return context.Canceled
},
}).
Build()
metrics := appsetmetrics.NewFakeAppsetMetrics()
r := ApplicationSetReconciler{
Client: fakeClient,
Scheme: scheme,
Recorder: record.NewFakeRecorder(10),
Metrics: metrics,
}
newApp := v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{Name: "newapp", Namespace: "namespace"},
Spec: v1alpha1.ApplicationSpec{Project: "default"},
}
err = r.createOrUpdateInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, []v1alpha1.Application{newApp})
require.ErrorIs(t, err, context.Canceled)
})
}
func TestDeleteInCluster_ContextCancellation(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = corev1.AddToScheme(scheme)
require.NoError(t, err)
appSet := v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
}
existingApp := v1alpha1.Application{
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "delete-me",
Namespace: "namespace",
ResourceVersion: "1",
},
Spec: v1alpha1.ApplicationSpec{Project: "project"},
}
makeReconciler := func(t *testing.T, fakeClient crtclient.Client) ApplicationSetReconciler {
t.Helper()
kubeclientset := kubefake.NewClientset()
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "namespace")
require.NoError(t, err)
cancel := startAndSyncInformer(t, clusterInformer)
t.Cleanup(cancel)
return ApplicationSetReconciler{
Client: fakeClient,
Scheme: scheme,
Recorder: record.NewFakeRecorder(10),
KubeClientset: kubeclientset,
Metrics: appsetmetrics.NewFakeAppsetMetrics(),
ClusterInformer: clusterInformer,
}
}
t.Run("context canceled on delete is returned directly", func(t *testing.T) {
app := existingApp.DeepCopy()
err = controllerutil.SetControllerReference(&appSet, app, scheme)
require.NoError(t, err)
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&appSet, app).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Delete: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ ...crtclient.DeleteOption) error {
return context.Canceled
},
}).
Build()
r := makeReconciler(t, fakeClient)
err = r.deleteInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, []v1alpha1.Application{})
require.ErrorIs(t, err, context.Canceled)
})
t.Run("context deadline exceeded on delete is returned directly", func(t *testing.T) {
app := existingApp.DeepCopy()
err = controllerutil.SetControllerReference(&appSet, app, scheme)
require.NoError(t, err)
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&appSet, app).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Delete: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ ...crtclient.DeleteOption) error {
return context.DeadlineExceeded
},
}).
Build()
r := makeReconciler(t, fakeClient)
err = r.deleteInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, []v1alpha1.Application{})
require.ErrorIs(t, err, context.DeadlineExceeded)
})
t.Run("non-context delete error is collected and returned", func(t *testing.T) {
app := existingApp.DeepCopy()
err = controllerutil.SetControllerReference(&appSet, app, scheme)
require.NoError(t, err)
deleteErr := errors.New("delete failed")
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(&appSet, app).
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
WithInterceptorFuncs(interceptor.Funcs{
Delete: func(_ context.Context, _ crtclient.WithWatch, _ crtclient.Object, _ ...crtclient.DeleteOption) error {
return deleteErr
},
}).
Build()
r := makeReconciler(t, fakeClient)
err = r.deleteInCluster(t.Context(), log.NewEntry(log.StandardLogger()), appSet, []v1alpha1.Application{})
require.ErrorIs(t, err, deleteErr)
})
}
func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
@@ -7754,40 +7321,6 @@ func TestIsRollingSyncStrategy(t *testing.T) {
}
}
func TestFirstAppError(t *testing.T) {
errA := errors.New("error from app-a")
errB := errors.New("error from app-b")
errC := errors.New("error from app-c")
t.Run("returns nil for empty map", func(t *testing.T) {
assert.NoError(t, firstAppError(map[string]error{}))
})
t.Run("returns the single error", func(t *testing.T) {
assert.ErrorIs(t, firstAppError(map[string]error{"app-a": errA}), errA)
})
t.Run("returns error from lexicographically first app name", func(t *testing.T) {
appErrors := map[string]error{
"app-c": errC,
"app-a": errA,
"app-b": errB,
}
assert.ErrorIs(t, firstAppError(appErrors), errA)
})
t.Run("result is stable across multiple calls with same input", func(t *testing.T) {
appErrors := map[string]error{
"app-c": errC,
"app-a": errA,
"app-b": errB,
}
for range 10 {
assert.ErrorIs(t, firstAppError(appErrors), errA, "firstAppError must return the same error on every call")
}
})
}
func TestSyncApplication(t *testing.T) {
tests := []struct {
name string

View File

@@ -24,43 +24,6 @@ import (
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
)
var appEquality = conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
return a.Cmp(b) == 0
},
func(a, b metav1.MicroTime) bool {
return a.UTC().Equal(b.UTC())
},
func(a, b metav1.Time) bool {
return a.UTC().Equal(b.UTC())
},
func(a, b labels.Selector) bool {
return a.String() == b.String()
},
func(a, b fields.Selector) bool {
return a.String() == b.String()
},
func(a, b argov1alpha1.ApplicationDestination) bool {
return a.Namespace == b.Namespace && a.Name == b.Name && a.Server == b.Server
},
)
// BuildIgnoreDiffConfig constructs a DiffConfig from the ApplicationSet's ignoreDifferences rules.
// Returns nil when ignoreDifferences is empty.
func BuildIgnoreDiffConfig(ignoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) (argodiff.DiffConfig, error) {
if len(ignoreDifferences) == 0 {
return nil, nil
}
return argodiff.NewDiffConfigBuilder().
WithDiffSettings(ignoreDifferences.ToApplicationIgnoreDifferences(), nil, false, ignoreNormalizerOpts).
WithNoCache().
Build()
}
// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function
// in sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go
// to add equality for argov1alpha1.ApplicationDestination
@@ -71,15 +34,10 @@ func BuildIgnoreDiffConfig(ignoreDifferences argov1alpha1.ApplicationSetIgnoreDi
// cluster. The object's desired state must be reconciled with the existing
// state inside the passed in callback MutateFn.
//
// diffConfig must be built once per reconcile cycle via BuildIgnoreDiffConfig and may be nil
// when there are no ignoreDifferences rules. obj.Spec must already be normalized by the caller
// via NormalizeApplicationSpec before this function is called; the live object fetched from the
// cluster is normalized internally.
//
// The MutateFn is called regardless of creating or updating an object.
//
// It returns the executed operation and an error.
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, diffConfig argodiff.DiffConfig, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
key := client.ObjectKeyFromObject(obj)
if err := c.Get(ctx, key, obj); err != nil {
if !errors.IsNotFound(err) {
@@ -101,18 +59,43 @@ func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, dif
return controllerutil.OperationResultNone, err
}
// Normalize the live spec to avoid spurious diffs from unimportant differences (e.g. nil vs
// empty SyncPolicy). obj.Spec is already normalized by the caller; only the live side needs it.
normalizedLive.Spec = *argo.NormalizeApplicationSpec(&normalizedLive.Spec)
// Apply ignoreApplicationDifferences rules to remove ignored fields from both the live and the desired state. This
// prevents those differences from appearing in the diff and therefore in the patch.
err := applyIgnoreDifferences(diffConfig, normalizedLive, obj)
err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj, ignoreNormalizerOpts)
if err != nil {
return controllerutil.OperationResultNone, fmt.Errorf("failed to apply ignore differences: %w", err)
}
if appEquality.DeepEqual(normalizedLive, obj) {
// Normalize to avoid diffing on unimportant differences.
normalizedLive.Spec = *argo.NormalizeApplicationSpec(&normalizedLive.Spec)
obj.Spec = *argo.NormalizeApplicationSpec(&obj.Spec)
equality := conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
return a.Cmp(b) == 0
},
func(a, b metav1.MicroTime) bool {
return a.UTC().Equal(b.UTC())
},
func(a, b metav1.Time) bool {
return a.UTC().Equal(b.UTC())
},
func(a, b labels.Selector) bool {
return a.String() == b.String()
},
func(a, b fields.Selector) bool {
return a.String() == b.String()
},
func(a, b argov1alpha1.ApplicationDestination) bool {
return a.Namespace == b.Namespace && a.Name == b.Name && a.Server == b.Server
},
)
if equality.DeepEqual(normalizedLive, obj) {
return controllerutil.OperationResultNone, nil
}
@@ -152,13 +135,19 @@ func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object)
}
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the applications in place.
// diffConfig may be nil, in which case this is a no-op.
func applyIgnoreDifferences(diffConfig argodiff.DiffConfig, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application) error {
if diffConfig == nil {
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) error {
if len(applicationSetIgnoreDifferences) == 0 {
return nil
}
generatedAppCopy := generatedApp.DeepCopy()
diffConfig, err := argodiff.NewDiffConfigBuilder().
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false, ignoreNormalizerOpts).
WithNoCache().
Build()
if err != nil {
return fmt.Errorf("failed to build diff config: %w", err)
}
unstructuredFound, err := appToUnstructured(found)
if err != nil {
return fmt.Errorf("failed to convert found application to unstructured: %w", err)

View File

@@ -224,9 +224,7 @@ spec:
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
require.NoError(t, err, tc.generatedApp)
diffConfig, err := BuildIgnoreDiffConfig(tc.ignoreDifferences, normalizers.IgnoreNormalizerOpts{})
require.NoError(t, err)
err = applyIgnoreDifferences(diffConfig, &foundApp, &generatedApp)
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp, normalizers.IgnoreNormalizerOpts{})
require.NoError(t, err)
yamlFound, err := yaml.Marshal(tc.foundApp)
require.NoError(t, err)

View File

@@ -79,7 +79,6 @@ func NewCommand() *cobra.Command {
tokenRefStrictMode bool
maxResourcesStatusCount int
cacheSyncPeriod time.Duration
concurrentApplicationUpdates int
)
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
@@ -240,25 +239,24 @@ func NewCommand() *cobra.Command {
})
if err = (&controllers.ApplicationSetReconciler{
Generators: topLevelGenerators,
Client: utils.NewCacheSyncingClient(mgr.GetClient(), mgr.GetCache()),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
Renderer: &utils.Render{},
Policy: policyObj,
EnablePolicyOverride: enablePolicyOverride,
KubeClientset: k8sClient,
ArgoDB: argoCDDB,
ArgoCDNamespace: namespace,
ApplicationSetNamespaces: applicationSetNamespaces,
EnableProgressiveSyncs: enableProgressiveSyncs,
SCMRootCAPath: scmRootCAPath,
GlobalPreservedAnnotations: globalPreservedAnnotations,
GlobalPreservedLabels: globalPreservedLabels,
Metrics: &metrics,
MaxResourcesStatusCount: maxResourcesStatusCount,
ClusterInformer: clusterInformer,
ConcurrentApplicationUpdates: concurrentApplicationUpdates,
Generators: topLevelGenerators,
Client: utils.NewCacheSyncingClient(mgr.GetClient(), mgr.GetCache()),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
Renderer: &utils.Render{},
Policy: policyObj,
EnablePolicyOverride: enablePolicyOverride,
KubeClientset: k8sClient,
ArgoDB: argoCDDB,
ArgoCDNamespace: namespace,
ApplicationSetNamespaces: applicationSetNamespaces,
EnableProgressiveSyncs: enableProgressiveSyncs,
SCMRootCAPath: scmRootCAPath,
GlobalPreservedAnnotations: globalPreservedAnnotations,
GlobalPreservedLabels: globalPreservedLabels,
Metrics: &metrics,
MaxResourcesStatusCount: maxResourcesStatusCount,
ClusterInformer: clusterInformer,
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
os.Exit(1)
@@ -305,7 +303,6 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&enableGitHubAPIMetrics, "enable-github-api-metrics", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_GITHUB_API_METRICS", false), "Enable GitHub API metrics for generators that use the GitHub API")
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 5000, 0, math.MaxInt), "Max number of resources stored in appset status.")
command.Flags().DurationVar(&cacheSyncPeriod, "cache-sync-period", env.ParseDurationFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CACHE_SYNC_PERIOD", time.Hour*10, 0, time.Hour*24), "Period at which the manager client cache is forcefully resynced with the Kubernetes API server. 0 disables periodic resync.")
command.Flags().IntVar(&concurrentApplicationUpdates, "concurrent-application-updates", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_APPLICATION_UPDATES", 1, 1, 200), "Number of concurrent Application create/update/delete operations per ApplicationSet reconcile.")
return &command
}

View File

@@ -1,28 +0,0 @@
package command
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewCommand_ConcurrentApplicationUpdatesFlag(t *testing.T) {
cmd := NewCommand()
flag := cmd.Flags().Lookup("concurrent-application-updates")
require.NotNil(t, flag, "expected --concurrent-application-updates flag to be registered")
assert.Equal(t, "int", flag.Value.Type())
assert.Equal(t, "1", flag.DefValue, "default should be 1")
}
func TestNewCommand_ConcurrentApplicationUpdatesFlagValue(t *testing.T) {
cmd := NewCommand()
err := cmd.Flags().Set("concurrent-application-updates", "5")
require.NoError(t, err)
val, err := cmd.Flags().GetInt("concurrent-application-updates")
require.NoError(t, err)
assert.Equal(t, 5, val)
}

View File

@@ -34,7 +34,6 @@ import (
"github.com/argoproj/argo-cd/v3/util/dex"
"github.com/argoproj/argo-cd/v3/util/env"
"github.com/argoproj/argo-cd/v3/util/errors"
utilglob "github.com/argoproj/argo-cd/v3/util/glob"
"github.com/argoproj/argo-cd/v3/util/kube"
"github.com/argoproj/argo-cd/v3/util/templates"
"github.com/argoproj/argo-cd/v3/util/tls"
@@ -88,7 +87,6 @@ func NewCommand() *cobra.Command {
applicationNamespaces []string
enableProxyExtension bool
webhookParallelism int
globCacheSize int
hydratorEnabled bool
syncWithReplaceAllowed bool
@@ -124,7 +122,6 @@ func NewCommand() *cobra.Command {
cli.SetLogFormat(cmdutil.LogFormat)
cli.SetLogLevel(cmdutil.LogLevel)
cli.SetGLogLevel(glogLevel)
utilglob.SetCacheSize(globCacheSize)
// Recover from panic and log the error using the configured logger instead of the default.
defer func() {
@@ -329,7 +326,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_SERVER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
command.Flags().IntVar(&globCacheSize, "glob-cache-size", env.ParseNumFromEnv("ARGOCD_SERVER_GLOB_CACHE_SIZE", utilglob.DefaultGlobCacheSize, 1, math.MaxInt32), "Maximum number of compiled glob patterns to cache for RBAC evaluation")
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
command.Flags().BoolVar(&hydratorEnabled, "hydrator-enabled", env.ParseBoolFromEnv("ARGOCD_HYDRATOR_ENABLED", false), "Feature flag to enable Hydrator. Default (\"false\")")
command.Flags().BoolVar(&syncWithReplaceAllowed, "sync-with-replace-allowed", env.ParseBoolFromEnv("ARGOCD_SYNC_WITH_REPLACE_ALLOWED", true), "Whether to allow users to select replace for syncs from UI/CLI")

View File

@@ -308,9 +308,22 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, project *v1alp
sync.WithLogr(logutils.NewLogrusLogger(logEntry)),
sync.WithHealthOverride(lua.ResourceHealthOverrides(resourceOverrides)),
sync.WithPermissionValidator(func(un *unstructured.Unstructured, res *metav1.APIResource) error {
return validateSyncPermissions(project, destCluster, func(proj string) ([]*v1alpha1.Cluster, error) {
return m.db.GetProjectClusters(context.TODO(), proj)
}, un, res)
if !project.IsGroupKindNamePermitted(un.GroupVersionKind().GroupKind(), un.GetName(), res.Namespaced) {
return fmt.Errorf("resource %s:%s is not permitted in project %s", un.GroupVersionKind().Group, un.GroupVersionKind().Kind, project.Name)
}
if res.Namespaced {
permitted, err := project.IsDestinationPermitted(destCluster, un.GetNamespace(), func(project string) ([]*v1alpha1.Cluster, error) {
return m.db.GetProjectClusters(context.TODO(), project)
})
if err != nil {
return err
}
if !permitted {
return fmt.Errorf("namespace %v is not permitted in project '%s'", un.GetNamespace(), project.Name)
}
}
return nil
}),
sync.WithOperationSettings(syncOp.DryRun, syncOp.Prune, syncOp.SyncStrategy.Force(), syncOp.IsApplyStrategy() || len(syncOp.Resources) > 0),
sync.WithInitialState(state.Phase, state.Message, initialResourcesRes, state.StartedAt),
@@ -592,33 +605,3 @@ func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application
// if there is no match found in the AppProject.Spec.DestinationServiceAccounts, use the default service account of the destination namespace.
return "", fmt.Errorf("no matching service account found for destination server %s and namespace %s", application.Spec.Destination.Server, serviceAccountNamespace)
}
// validateSyncPermissions checks whether the given resource is permitted by the project's
// allow/deny lists and destination rules. It returns an error if the API resource info is nil
// (preventing a nil-pointer panic), if the resource's group/kind is not permitted, or if
// the resource's namespace is not an allowed destination.
func validateSyncPermissions(
project *v1alpha1.AppProject,
destCluster *v1alpha1.Cluster,
getProjectClusters func(string) ([]*v1alpha1.Cluster, error),
un *unstructured.Unstructured,
res *metav1.APIResource,
) error {
if res == nil {
return fmt.Errorf("failed to get API resource info for %s/%s: unable to verify permissions", un.GroupVersionKind().Group, un.GroupVersionKind().Kind)
}
if !project.IsGroupKindNamePermitted(un.GroupVersionKind().GroupKind(), un.GetName(), res.Namespaced) {
return fmt.Errorf("resource %s:%s is not permitted in project %s", un.GroupVersionKind().Group, un.GroupVersionKind().Kind, project.Name)
}
if res.Namespaced {
permitted, err := project.IsDestinationPermitted(destCluster, un.GetNamespace(), getProjectClusters)
if err != nil {
return err
}
if !permitted {
return fmt.Errorf("namespace %v is not permitted in project '%s'", un.GetNamespace(), project.Name)
}
}
return nil
}

View File

@@ -13,7 +13,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/controller/testdata"
@@ -1654,116 +1653,3 @@ func dig(obj any, path ...any) any {
return i
}
func TestValidateSyncPermissions(t *testing.T) {
t.Parallel()
newResource := func(group, kind, name, namespace string) *unstructured.Unstructured {
obj := &unstructured.Unstructured{}
obj.SetGroupVersionKind(schema.GroupVersionKind{Group: group, Version: "v1", Kind: kind})
obj.SetName(name)
obj.SetNamespace(namespace)
return obj
}
project := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "test-project",
Namespace: "argocd",
},
Spec: v1alpha1.AppProjectSpec{
Destinations: []v1alpha1.ApplicationDestination{
{Namespace: "default", Server: "*"},
},
},
}
destCluster := &v1alpha1.Cluster{
Server: "https://kubernetes.default.svc",
}
noopGetClusters := func(_ string) ([]*v1alpha1.Cluster, error) {
return nil, nil
}
t.Run("nil APIResource returns error", func(t *testing.T) {
t.Parallel()
un := newResource("apps", "Deployment", "my-deploy", "default")
err := validateSyncPermissions(project, destCluster, noopGetClusters, un, nil)
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to get API resource info for apps/Deployment")
assert.Contains(t, err.Error(), "unable to verify permissions")
})
t.Run("permitted namespaced resource returns no error", func(t *testing.T) {
t.Parallel()
un := newResource("", "ConfigMap", "my-cm", "default")
res := &metav1.APIResource{Name: "configmaps", Namespaced: true}
err := validateSyncPermissions(project, destCluster, noopGetClusters, un, res)
assert.NoError(t, err)
})
t.Run("group kind not permitted returns error", func(t *testing.T) {
t.Parallel()
projectWithDenyList := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "restricted-project",
Namespace: "argocd",
},
Spec: v1alpha1.AppProjectSpec{
Destinations: []v1alpha1.ApplicationDestination{
{Namespace: "*", Server: "*"},
},
ClusterResourceBlacklist: []v1alpha1.ClusterResourceRestrictionItem{
{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
},
},
}
un := newResource("rbac.authorization.k8s.io", "ClusterRole", "my-role", "")
res := &metav1.APIResource{Name: "clusterroles", Namespaced: false}
err := validateSyncPermissions(projectWithDenyList, destCluster, noopGetClusters, un, res)
require.Error(t, err)
assert.Contains(t, err.Error(), "is not permitted in project")
})
t.Run("namespace not permitted returns error", func(t *testing.T) {
t.Parallel()
un := newResource("", "ConfigMap", "my-cm", "kube-system")
res := &metav1.APIResource{Name: "configmaps", Namespaced: true}
err := validateSyncPermissions(project, destCluster, noopGetClusters, un, res)
require.Error(t, err)
assert.Contains(t, err.Error(), "namespace kube-system is not permitted in project")
})
t.Run("cluster-scoped resource skips namespace check", func(t *testing.T) {
t.Parallel()
projectWithClusterResources := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "test-project",
Namespace: "argocd",
},
Spec: v1alpha1.AppProjectSpec{
Destinations: []v1alpha1.ApplicationDestination{
{Namespace: "default", Server: "*"},
},
ClusterResourceWhitelist: []v1alpha1.ClusterResourceRestrictionItem{
{Group: "*", Kind: "*"},
},
},
}
un := newResource("", "Namespace", "my-ns", "")
res := &metav1.APIResource{Name: "namespaces", Namespaced: false}
err := validateSyncPermissions(projectWithClusterResources, destCluster, noopGetClusters, un, res)
assert.NoError(t, err)
})
}

View File

@@ -21,8 +21,8 @@ These are the upcoming releases dates:
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](https://github.com/argoproj/argo-cd/issues/23347) |
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | [Michael Crenshaw](https://github.com/crenshaw-dev) | [checklist](https://github.com/argoproj/argo-cd/issues/24539) |
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | [Peter Jiang](https://github.com/pjiang-dev) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/25211) |
| v3.4 | Monday, Mar. 16, 2026 | Tuesday, May. 5, 2026 | [Codey Jenkins](https://github.com/FourFifthsCode) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/26527) |
| v3.5 | Tuesday, Jun. 16, 2026 | Tuesday, Aug. 4, 2026 | [Patroklos Papapetrou](https://github.com/ppapapetrou76) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/26746) |
| v3.4 | Monday, Mar. 16, 2026 | Monday, May. 4, 2026 | [Codey Jenkins](https://github.com/FourFifthsCode) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/26527) |
| v3.5 | Monday, Jun. 15, 2026 | Monday, Aug. 3, 2026 | [Patroklos Papapetrou](https://github.com/ppapapetrou76) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/26746) |
Actual release dates might differ from the plan by a few days.
@@ -36,10 +36,10 @@ effectively means that there is a seven-week feature freeze.
These are the approximate release dates:
* The first Tuesday of February
* The first Tuesday of May
* The first Tuesday of August
* The first Tuesday of November
* The first Monday of February
* The first Monday of May
* The first Monday of August
* The first Monday of November
Dates may be shifted slightly to accommodate holidays. Those shifts should be minimal.

View File

@@ -83,26 +83,6 @@ or a randomly generated password stored in a secret (Argo CD 1.9 and later).
Add `admin.enabled: "false"` to the `argocd-cm` ConfigMap
(see [user management](./operator-manual/user-management/index.md)).
## How to view orphaned resources?
Orphaned Kubernetes resources are top-level namespaced resources that do not belong to any Argo CD Application. For more information, see [Orphaned Resources Monitoring](./user-guide/orphaned-resources.md).
!!! warning
Enabling orphaned resource monitoring has performance implications. If an AppProject monitors a namespace containing many resources not managed by Argo CD (e.g. `kube-system`), it can significantly impact your Argo CD instance. Enable this feature only on projects with well-scoped namespaces.
To view orphaned resources in the Argo CD UI:
1. Click on **Settings** in the sidebar.
2. Click on **Projects**.
3. Select the desired project.
4. Scroll down to the **RESOURCE MONITORING** section.
5. Click **Edit** and enable the monitoring feature.
6. Check **Enable application warning conditions?** to enable warnings.
7. Click **Save**.
8. Navigate back to **Applications** and select an application under the configured project.
9. In the **Sync Panel**, under **APP CONDITIONS**, you will see the orphaned resources warning.
10. Click **Show Orphaned** below the **HEALTH STATUS** filters to display orphaned resources.
## Argo CD cannot deploy Helm Chart based applications without internet access, how can I solve it?
Argo CD might fail to generate Helm chart manifests if the chart has dependencies located in external repositories. To

View File

@@ -230,7 +230,7 @@ p, somerole, applicationsets, get, foo/bar/*, allow
### Using the CLI
You can use all existing Argo CD CLI commands for managing ApplicationSets in other namespaces, exactly as you would use the CLI to manage ApplicationSets in the control plane's namespace.
You can use all existing Argo CD CLI commands for managing applications in other namespaces, exactly as you would use the CLI to manage applications in the control plane's namespace.
For example, to retrieve the `ApplicationSet` named `foo` in the namespace `bar`, you can use the following CLI command:

View File

@@ -150,8 +150,6 @@ data:
server.api.content.types: "application/json"
# Number of webhook requests processed concurrently (default 50)
server.webhook.parallelism.limit: "50"
# Maximum number of compiled glob patterns to cache for RBAC evaluation (default 10000)
server.glob.cache.size: "10000"
# Whether to allow sync with replace checked to go through. Resource-level annotation to replace override this setting, i.e. it's only enforced on the API server level.
server.sync.replace.allowed: "true"

View File

@@ -253,11 +253,6 @@ spec:
megabytes.
The default value is 200. You might need to increase this for an Argo CD instance that manages 3000+ applications.
* The `server.glob.cache.size` config key in `argocd-cmd-params-cm` (or the `--glob-cache-size` server flag) controls
the maximum number of compiled glob patterns cached for RBAC policy evaluation. Glob pattern compilation is expensive,
and caching significantly improves RBAC performance when many applications are managed. The default value is 10000.
See [RBAC Glob Matching](rbac.md#glob-matching) for more details.
### argocd-dex-server, argocd-redis
The `argocd-dex-server` uses an in-memory database, and two or more instances may have inconsistent data.

View File

@@ -321,10 +321,6 @@ When the `example-user` executes the `extensions/DaemonSet/test` action, the fol
3. The value `action/extensions/DaemonSet/test` matches `action/extensions/*`. Note that `/` is not treated as a separator and the use of `**` is not necessary.
4. The value `default/my-app` matches `default/*`.
> [!TIP]
> For performance tuning of glob pattern matching, see the `server.glob.cache.size` config key in
> [High Availability - argocd-server](high_availability.md#argocd-server).
## Using SSO Users/Groups
The `scopes` field controls which OIDC scopes to examine during RBAC enforcement (in addition to `sub` scope).

View File

@@ -22,7 +22,6 @@ argocd-applicationset-controller [flags]
--client-certificate string Path to a client certificate file for TLS
--client-key string Path to a client key file for TLS
--cluster string The name of the kubeconfig cluster to use
--concurrent-application-updates int Number of concurrent Application create/update/delete operations per ApplicationSet reconcile. (default 1)
--concurrent-reconciliations int Max concurrent reconciliations limit for the controller (default 10)
--context string The name of the kubeconfig context to use
--debug Print debug logs. Takes precedence over loglevel

View File

@@ -54,7 +54,6 @@ argocd-server [flags]
--enable-gzip Enable GZIP compression (default true)
--enable-k8s-event none Enable ArgoCD to use k8s event. For disabling all events, set the value as none. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated) (default [all])
--enable-proxy-extension Enable Proxy Extension feature
--glob-cache-size int Maximum number of compiled glob patterns to cache for RBAC evaluation (default 10000)
--gloglevel int Set the glog logging level
-h, --help help for argocd-server
--hydrator-enabled Feature flag to enable Hydrator. Default ("false")

View File

@@ -1,2 +1,5 @@
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
version.
| Argo CD version | Kubernetes versions |
|-----------------|---------------------|
| 3.4 | v1.35, v1.34, v1.33, v1.32 |
| 3.3 | v1.34, v1.33, v1.32, v1.31 |
| 3.2 | v1.34, v1.33, v1.32, v1.31 |

View File

@@ -90,241 +90,6 @@ source:
ignoreMissingValueFiles: true
```
## Glob Patterns in Value Files
Glob patterns can be used in `valueFiles` entries to match multiple files at once. This is useful
when the set of environment-specific override files is not known in advance, or when you want to
pick up new files automatically without updating the Application spec.
```bash
# Single quotes prevent the shell from expanding the glob before Argo CD receives it
argocd app set helm-guestbook --values 'envs/*.yaml'
```
In the declarative syntax:
```yaml
source:
helm:
valueFiles:
- envs/*.yaml
```
### Supported pattern syntax
Glob expansion uses the [doublestar](https://github.com/bmatcuk/doublestar) library.
| Pattern | Description |
|---------|-------------|
| `*` | Matches any sequence of non-separator characters within a single directory level |
| `?` | Matches any single non-separator character |
| `[abc]` | Matches one of the characters listed inside the brackets |
| `[a-z]` | Matches any character in the given range |
| `**` | Matches any sequence of characters including `/` (recursive across directory levels) |
### How files are passed to Helm
Each matched file is passed to `helm template` as a separate `--values <path>` flag, in the same
order they appear after expansion. This is identical to listing each file individually in
`valueFiles`. Argo CD does the expansion before invoking Helm.
Matched files are expanded **in-place** within the `valueFiles` list and sorted in **lexical
(alphabetical) order**. Because Helm gives higher precedence to later `--values` flags, lexical
order determines which file wins when the same key appears in multiple files.
```
envs/
a.yaml # sets foo: a-value
b.yaml # sets foo: b-value
```
```yaml
# envs/*.yaml expands to: envs/a.yaml, envs/b.yaml (lexical order)
# b.yaml is last → foo = "b-value"
source:
helm:
valueFiles:
- envs/*.yaml
```
When you have multiple entries in `valueFiles`, the relative order between entries is preserved.
Glob expansion only reorders files within a single pattern:
```yaml
valueFiles:
- base.yaml # passed first
- overrides/*.yaml # expanded in lexical order, passed after base.yaml
- final.yaml # passed last, highest precedence
```
### Recursive matching with `**`
Use `**` to match files at any depth below a directory:
```yaml
# envs/**/*.yaml processes each directory's own files before descending into subdirectories,
# with directories and files sorted alphabetically at each level.
#
# envs/a.yaml ← 'a' (flat file in envs/)
# envs/z.yaml ← 'z' (flat file in envs/, processed before descending)
# envs/nested/c.yaml ← inside envs/nested/, processed after envs/ flat files
#
# nested/c.yaml is last → foo = "nested-value"
source:
helm:
valueFiles:
- envs/**/*.yaml
```
> [!NOTE]
> `**` matches zero or more path segments, so `envs/**/*.yaml` also matches files directly
> inside `envs/` (not just subdirectories). doublestar traverses directories in lexical order
> and processes each directory's own files (alphabetically) before descending into its
> subdirectories. This means `envs/z.yaml` always comes before `envs/nested/c.yaml`, even
> though `'n' < 'z'` alphabetically. To make ordering fully explicit and predictable,
> use numeric prefixes (see [Naming conventions](#naming-conventions)).
### Using environment variables in glob patterns
[Build environment variables](./build-environment.md) are substituted **before** the glob is
evaluated, so you can construct patterns dynamically:
```yaml
source:
helm:
valueFiles:
- envs/$ARGOCD_APP_NAME/*.yaml
```
This lets a single Application template expand to the right set of files per app name.
### Glob patterns with multiple sources
Glob patterns work with [value files from an external repository](./multiple_sources.md#helm-value-files-from-external-git-repository).
The `$ref` variable is resolved first to the external repo's root, and the rest of the pattern is
evaluated within that repo's directory tree:
```yaml
sources:
- repoURL: https://git.example.com/my-configs.git
ref: configs
- repoURL: https://git.example.com/my-chart.git
path: chart
helm:
valueFiles:
- $configs/envs/*.yaml # matches files in the 'my-configs' repo under envs/
```
### Naming conventions
Because files are sorted lexically, the sort order controls merge precedence. A common pattern is
to use a numeric prefix to make the intended order explicit:
```
values/
00-defaults.yaml
10-region.yaml
20-env.yaml
30-override.yaml
```
```yaml
valueFiles:
- values/*.yaml
# expands to: 00-defaults.yaml, 10-region.yaml, 20-env.yaml, 30-override.yaml
# 30-override.yaml has the highest precedence
```
Without a prefix, pure alphabetical ordering applies. Be careful with names that sort
unexpectedly, for example `values-10.yaml` sorts before `values-9.yaml` because `"1"` < `"9"`
lexically.
### Constraints and limitations
**Path boundary**: Glob patterns cannot match files outside the repository root, even with
patterns like `../../secrets/*.yaml`. Argo CD resolves the pattern's base path against the
repository root before expanding it, and any match that would escape the root is rejected.
**Symlinks**: Argo CD follows symlinks when checking the path boundary. A symlink that lives
inside the repository but points to a target outside the repository root is rejected, even though
the symlink's own path is within the repo. This check applies to every file produced by glob
expansion, including multi-hop symlink chains. Symlinks that resolve to a target still inside the
repository are allowed.
**Absolute paths**: A path starting with `/` is treated as relative to the **repository root**,
not the filesystem root. The pattern `/configs/*.yaml` matches files in the `configs/` directory
at the top of the repository.
**Remote URLs are not glob-expanded**: Entries that are remote URLs (e.g.
`https://raw.githubusercontent.com/.../values.yaml`) are passed to Helm as-is. Glob characters
in a URL have no special meaning and will cause the URL to fail if the literal characters are not
part of the URL.
**Shell quoting on the CLI**: Shells expand glob patterns before passing arguments to programs.
Always quote patterns to prevent unintended shell expansion:
```bash
# Correct: single quotes pass the literal pattern to Argo CD
argocd app set myapp --values 'envs/*.yaml'
# Incorrect: the shell expands *.yaml against the current directory first
argocd app set myapp --values envs/*.yaml
```
### Deduplication
Each file is included only once, but **explicit entries take priority over glob matches** when
determining position. If a file appears both in a glob pattern and as an explicit entry, the glob
skips it and the explicit entry places it at its declared position.
```yaml
valueFiles:
- envs/*.yaml # expands to base.yaml, prod.yaml — but prod.yaml is listed explicitly below,
# so the glob skips it: only base.yaml is added here
- envs/prod.yaml # placed here at the end, giving it highest Helm precedence
```
This means you can use a glob to pick up all files in a directory and then pin a specific file to
the end (highest precedence) by listing it explicitly after the glob.
If the same file (same absolute path) is matched by two glob patterns, it is included at the
position of the first match. Subsequent glob matches for that exact path are silently dropped.
Files with the same name but at different paths are treated as distinct files and are always included.
```yaml
valueFiles:
- envs/*.yaml # matches envs/base.yaml, envs/prod.yaml
- envs/**/*.yaml # envs/prod.yaml already matched above and is skipped;
# envs/nested/prod.yaml is a different path and is still included
```
### No-match behavior
If a glob pattern matches no files, Argo CD saves the Application spec (the spec is not invalid and
the files may be added to the repository later) and surfaces a `ComparisonError` condition on the
Application:
```
values file glob "nonexistent/*.yaml" matched no files
```
The app will remain in a degraded state until the pattern matches at least one file or the pattern
is removed. No spec update is required once the files are added to the repository.
To silently skip a pattern that matches no files instead of raising an error, combine the glob with
`ignoreMissingValueFiles`:
```yaml
source:
helm:
valueFiles:
- envs/*.yaml
ignoreMissingValueFiles: true
```
This is useful for implementing a default/override pattern where override files may not exist in
every environment.
## Values
Argo CD supports the equivalent of a values file directly in the Application manifest using the `source.helm.valuesObject` key.

View File

@@ -1,9 +1,5 @@
# Orphaned Resources Monitoring
!!! warning
Enabling orphaned resource monitoring has performance implications. If an AppProject monitors a namespace containing many resources not managed by Argo CD (e.g. `kube-system`), it can significantly impact your Argo CD instance. Enable this feature only on projects with well-scoped namespaces.
An [orphaned Kubernetes resource](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#orphaned-dependents) is a top-level namespaced resource that does not belong to any Argo CD Application. The Orphaned Resources Monitoring feature allows detecting
orphaned resources, inspecting/removing resources using the Argo CD UI, and generating a warning.
@@ -42,10 +38,10 @@ Not every resource in the Kubernetes cluster is controlled by the end user and m
The following resources are never considered orphaned:
- Namespaced resources denied in the project. Usually, such resources are managed by cluster administrators and are not supposed to be modified by a namespace user.
- `ServiceAccount` with the name `default` (and the corresponding auto-generated `ServiceAccountToken`).
- `Service` with the name `kubernetes` in the `default` namespace.
- `ConfigMap` with the name `kube-root-ca.crt` in all namespaces.
* Namespaced resources denied in the project. Usually, such resources are managed by cluster administrators and are not supposed to be modified by a namespace user.
* `ServiceAccount` with the name `default` (and the corresponding auto-generated `ServiceAccountToken`).
* `Service` with the name `kubernetes` in the `default` namespace.
* `ConfigMap` with the name `kube-root-ca.crt` in all namespaces.
You can prevent resources from being declared orphaned by providing a list of ignore rules, each defining a Group, Kind, and Name.
@@ -53,8 +49,8 @@ You can prevent resources from being declared orphaned by providing a list of ig
spec:
orphanedResources:
ignore:
- kind: ConfigMap
name: orphaned-but-ignored-configmap
- kind: ConfigMap
name: orphaned-but-ignored-configmap
```
The `name` can be a [glob pattern](https://github.com/gobwas/glob), e.g.:

View File

@@ -92,15 +92,6 @@ const (
RespectRbacStrict
)
// callState tracks whether action() has been called on a resource during hierarchy iteration.
type callState int
const (
notCalled callState = iota // action() has not been called yet
inProgress // action() is currently being processed (in call stack)
completed // action() has been called and processing is complete
)
type apiMeta struct {
namespaced bool
// watchCancel stops the watch of all resources for this API. This gets called when the cache is invalidated or when
@@ -1195,11 +1186,8 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
c.lock.RLock()
defer c.lock.RUnlock()
// Track whether action() has been called on each resource (notCalled/inProgress/completed).
// This is shared across processNamespaceHierarchy and processCrossNamespaceChildren.
// Note: This is distinct from 'crossNSTraversed' in processCrossNamespaceChildren, which tracks
// whether we've traversed a cluster-scoped key's cross-namespace children.
actionCallState := make(map[kube.ResourceKey]callState)
// Track visited resources to avoid cycles
visited := make(map[kube.ResourceKey]int)
// Group keys by namespace for efficient processing
keysPerNamespace := make(map[string][]kube.ResourceKey)
@@ -1215,18 +1203,12 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
for namespace, namespaceKeys := range keysPerNamespace {
nsNodes := c.nsIndex[namespace]
graph := buildGraph(nsNodes)
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, actionCallState, action)
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, visited, action)
}
// Process pre-computed cross-namespace children
if clusterKeys, ok := keysPerNamespace[""]; ok {
// Track which cluster-scoped keys have had their cross-namespace children traversed.
// This is distinct from 'actionCallState' - a resource may have had action() called
// (i.e., its actionCallState is in the completed state) but not yet had its cross-namespace
// children traversed. This prevents infinite recursion when resources have circular
// ownerReferences.
crossNSTraversed := make(map[kube.ResourceKey]bool)
c.processCrossNamespaceChildren(clusterKeys, actionCallState, crossNSTraversed, action)
c.processCrossNamespaceChildren(clusterKeys, visited, action)
}
}
@@ -1234,21 +1216,12 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries.
// It also handles multi-level hierarchies where cluster-scoped resources own other cluster-scoped resources
// that in turn own namespaced resources (e.g., Provider -> ProviderRevision -> Deployment in Crossplane).
// The crossNSTraversed map tracks which keys have already been processed to prevent infinite recursion
// from circular ownerReferences (e.g., a resource that owns itself).
func (c *clusterCache) processCrossNamespaceChildren(
clusterScopedKeys []kube.ResourceKey,
actionCallState map[kube.ResourceKey]callState,
crossNSTraversed map[kube.ResourceKey]bool,
visited map[kube.ResourceKey]int,
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
for _, clusterKey := range clusterScopedKeys {
// Skip if already processed (cycle detection)
if crossNSTraversed[clusterKey] {
continue
}
crossNSTraversed[clusterKey] = true
// Get cluster-scoped resource to access its UID
clusterResource := c.resources[clusterKey]
if clusterResource == nil {
@@ -1263,17 +1236,16 @@ func (c *clusterCache) processCrossNamespaceChildren(
continue
}
alreadyProcessed := actionCallState[childKey] != notCalled
alreadyVisited := visited[childKey] != 0
// If child is cluster-scoped and action() was already called by processNamespaceHierarchy,
// If child is cluster-scoped and was already visited by processNamespaceHierarchy,
// we still need to recursively check for its cross-namespace children.
// This handles multi-level hierarchies like: ClusterScoped -> ClusterScoped -> Namespaced
// (e.g., Crossplane's Provider -> ProviderRevision -> Deployment)
if alreadyProcessed {
if alreadyVisited {
if childKey.Namespace == "" {
// Recursively process cross-namespace children of this cluster-scoped child
// The crossNSTraversed map prevents infinite recursion on circular ownerReferences
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, actionCallState, crossNSTraversed, action)
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
}
continue
}
@@ -1286,16 +1258,16 @@ func (c *clusterCache) processCrossNamespaceChildren(
// Process this child
if action(child, nsNodes) {
actionCallState[childKey] = inProgress
visited[childKey] = 1
// Recursively process descendants using index-based traversal
c.iterateChildrenUsingIndex(child, nsNodes, actionCallState, action)
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
// If this child is also cluster-scoped, recursively process its cross-namespace children
if childKey.Namespace == "" {
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, actionCallState, crossNSTraversed, action)
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
}
actionCallState[childKey] = completed
visited[childKey] = 2
}
}
}
@@ -1306,14 +1278,14 @@ func (c *clusterCache) processCrossNamespaceChildren(
func (c *clusterCache) iterateChildrenUsingIndex(
parent *Resource,
nsNodes map[kube.ResourceKey]*Resource,
actionCallState map[kube.ResourceKey]callState,
visited map[kube.ResourceKey]int,
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
// Look up direct children of this parent using the index
childKeys := c.parentUIDToChildren[parent.Ref.UID]
for _, childKey := range childKeys {
if actionCallState[childKey] != notCalled {
continue // action() already called or in progress
if visited[childKey] != 0 {
continue // Already visited or in progress
}
child := c.resources[childKey]
@@ -1328,10 +1300,10 @@ func (c *clusterCache) iterateChildrenUsingIndex(
}
if action(child, nsNodes) {
actionCallState[childKey] = inProgress
visited[childKey] = 1
// Recursively process this child's descendants
c.iterateChildrenUsingIndex(child, nsNodes, actionCallState, action)
actionCallState[childKey] = completed
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
visited[childKey] = 2
}
}
}
@@ -1341,19 +1313,22 @@ func (c *clusterCache) processNamespaceHierarchy(
namespaceKeys []kube.ResourceKey,
nsNodes map[kube.ResourceKey]*Resource,
graph map[kube.ResourceKey]map[types.UID]*Resource,
actionCallState map[kube.ResourceKey]callState,
visited map[kube.ResourceKey]int,
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
for _, key := range namespaceKeys {
visited[key] = 0
}
for _, key := range namespaceKeys {
res := c.resources[key]
if actionCallState[key] == completed || !action(res, nsNodes) {
if visited[key] == 2 || !action(res, nsNodes) {
continue
}
actionCallState[key] = inProgress
visited[key] = 1
if _, ok := graph[key]; ok {
for _, child := range graph[key] {
if actionCallState[child.ResourceKey()] == notCalled && action(child, nsNodes) {
child.iterateChildrenV2(graph, nsNodes, actionCallState, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
if visited[child.ResourceKey()] == 0 && action(child, nsNodes) {
child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
if err != nil {
c.log.V(2).Info(err.Error())
return false
@@ -1363,7 +1338,7 @@ func (c *clusterCache) processNamespaceHierarchy(
}
}
}
actionCallState[key] = completed
visited[key] = 2
}
}

View File

@@ -2189,112 +2189,3 @@ func TestIterateHierarchyV2_NoDuplicatesCrossNamespace(t *testing.T) {
assert.Equal(t, 1, visitCount["namespaced-child"], "namespaced child should be visited once")
assert.Equal(t, 1, visitCount["cluster-child"], "cluster child should be visited once")
}
func TestIterateHierarchyV2_CircularOwnerReference_NoStackOverflow(t *testing.T) {
// Test that self-referencing resources (circular ownerReferences) don't cause stack overflow.
// This reproduces the bug reported in https://github.com/argoproj/argo-cd/issues/26783
// where a resource with an ownerReference pointing to itself caused infinite recursion.
// Create a cluster-scoped resource that owns itself (self-referencing)
selfReferencingResource := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "self-referencing",
UID: "self-ref-uid",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "self-referencing",
UID: "self-ref-uid", // Points to itself
}},
},
}
cluster := newCluster(t, selfReferencingResource).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
Meta: metav1.APIResource{Namespaced: false},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
visitCount := 0
// This should complete without stack overflow
cluster.IterateHierarchyV2(
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(selfReferencingResource))},
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
visitCount++
return true
},
)
// The self-referencing resource should be visited exactly once
assert.Equal(t, 1, visitCount, "self-referencing resource should be visited exactly once")
}
func TestIterateHierarchyV2_CircularOwnerChain_NoStackOverflow(t *testing.T) {
// Test that circular ownership chains (A -> B -> A) don't cause stack overflow.
// This is a more complex case where two resources own each other.
resourceA := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "resource-a",
UID: "uid-a",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "resource-b",
UID: "uid-b", // A is owned by B
}},
},
}
resourceB := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "resource-b",
UID: "uid-b",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "resource-a",
UID: "uid-a", // B is owned by A
}},
},
}
cluster := newCluster(t, resourceA, resourceB).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
Meta: metav1.APIResource{Namespaced: false},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
visitCount := make(map[string]int)
// This should complete without stack overflow
cluster.IterateHierarchyV2(
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(resourceA))},
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
visitCount[resource.Ref.Name]++
return true
},
)
// Each resource in the circular chain should be visited exactly once
assert.Equal(t, 1, visitCount["resource-a"], "resource-a should be visited exactly once")
assert.Equal(t, 1, visitCount["resource-b"], "resource-b should be visited exactly once")
}

View File

@@ -563,7 +563,7 @@ func (_c *ClusterCache_IsNamespaced_Call) RunAndReturn(run func(gk schema.GroupK
return _c
}
// IterateHierarchyV2 provides a mock function for the type ClusterCache
// IterateHierarchyV2 provides a mock function with given fields: keys, action, orphanedResourceNamespace
func (_mock *ClusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *cache.Resource, namespaceResources map[kube.ResourceKey]*cache.Resource) bool) {
_mock.Called(keys, action)
return

View File

@@ -76,16 +76,16 @@ func (r *Resource) toOwnerRef() metav1.OwnerReference {
}
// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource.
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, actionCallState map[kube.ResourceKey]callState, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
key := r.ResourceKey()
if actionCallState[key] == completed {
if visited[key] == 2 {
return
}
// this indicates that we've started processing this node's children
actionCallState[key] = inProgress
visited[key] = 1
defer func() {
// this indicates that we've finished processing this node's children
actionCallState[key] = completed
visited[key] = 2
}()
children, ok := graph[key]
if !ok || children == nil {
@@ -94,13 +94,13 @@ func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*R
for _, child := range children {
childKey := child.ResourceKey()
// For cross-namespace relationships, child might not be in ns, so use it directly from graph
switch actionCallState[childKey] {
case inProgress:
switch visited[childKey] {
case 1:
// Since we encountered a node that we're currently processing, we know we have a circular dependency.
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
case notCalled:
case 0:
if action(nil, child, ns) {
child.iterateChildrenV2(graph, ns, actionCallState, action)
child.iterateChildrenV2(graph, ns, visited, action)
}
}
}

View File

@@ -57,14 +57,14 @@ func TestAuthReconcileWithMissingNamespace(t *testing.T) {
_, err := k.authReconcile(context.Background(), role, "/dev/null", cmdutil.DryRunNone)
assert.Error(t, err)
assert.True(t, errors.IsNotFound(err), "returned error should be resource not found")
assert.True(t, errors.IsNotFound(err), "returned error wasn't not found")
roleBinding := testingutils.NewRoleBinding()
roleBinding.SetNamespace(namespace)
_, err = k.authReconcile(context.Background(), roleBinding, "/dev/null", cmdutil.DryRunNone)
assert.Error(t, err)
assert.True(t, errors.IsNotFound(err), "returned error should be resource not found")
assert.True(t, errors.IsNotFound(err), "returned error wasn't not found")
clusterRole := testingutils.NewClusterRole()
clusterRole.SetNamespace(namespace)

20
go.mod
View File

@@ -45,7 +45,6 @@ require (
github.com/gogits/go-gogs-client v0.0.0-20210131175652-1d7215cd8d85
github.com/gogo/protobuf v1.3.2
github.com/golang-jwt/jwt/v5 v5.3.1
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8
github.com/golang/protobuf v1.5.4
github.com/google/btree v1.1.3
github.com/google/gnostic-models v0.7.0 // indirect
@@ -103,7 +102,7 @@ require (
golang.org/x/term v0.41.0
golang.org/x/time v0.15.0
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57
google.golang.org/grpc v1.79.3
google.golang.org/grpc v1.79.2
google.golang.org/protobuf v1.36.11
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
@@ -149,18 +148,18 @@ require (
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20240116134246-a8cbe886bab0 // indirect
github.com/aws/aws-sdk-go-v2 v1.41.4
github.com/aws/aws-sdk-go-v2/config v1.32.11
github.com/aws/aws-sdk-go-v2/credentials v1.19.12
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.19.11
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.9
github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.8
github.com/aws/smithy-go v1.24.2
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -209,6 +208,7 @@ require (
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/glog v1.2.5 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/go-querystring v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect

36
go.sum
View File

@@ -128,10 +128,10 @@ github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1h
github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
github.com/aws/aws-sdk-go-v2/config v1.32.11 h1:ftxI5sgz8jZkckuUHXfC/wMUc8u3fG1vQS0plr2F2Zs=
github.com/aws/aws-sdk-go-v2/config v1.32.11/go.mod h1:twF11+6ps9aNRKEDimksp923o44w/Thk9+8YIlzWMmo=
github.com/aws/aws-sdk-go-v2/credentials v1.19.12 h1:oqtA6v+y5fZg//tcTWahyN9PEn5eDU/Wpvc2+kJ4aY8=
github.com/aws/aws-sdk-go-v2/credentials v1.19.12/go.mod h1:U3R1RtSHx6NB0DvEQFGyf/0sbrpJrluENHdPy1j/3TE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 h1:zOgq3uezl5nznfoK3ODuqbhVg1JzAGDUhXOsU0IDCAo=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20/go.mod h1:z/MVwUARehy6GAg/yQ1GO2IMl0k++cu1ohP9zo887wE=
github.com/aws/aws-sdk-go-v2/credentials v1.19.11 h1:NdV8cwCcAXrCWyxArt58BrvZJ9pZ9Fhf9w6Uh5W3Uyc=
github.com/aws/aws-sdk-go-v2/credentials v1.19.11/go.mod h1:30yY2zqkMPdrvxBqzI9xQCM+WrlrZKSOpSJEsylVU+8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 h1:INUvJxmhdEbVulJYHI061k4TVuS3jzzthNvjqvVvTKM=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19/go.mod h1:FpZN2QISLdEBWkayloda+sZjVJL+e9Gl0k1SyTgcswU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 h1:CNXO7mvgThFGqOFgbNAP2nol2qAWBOGfqR/7tQlvLmc=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20/go.mod h1:oydPDJKcfMhgfcgBUZaG+toBbwy8yPWubJXBVERtI4o=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 h1:tN6W/hg+pkM+tf9XDkWUbDEjGLb+raoBMFsTodcoYKw=
@@ -140,22 +140,22 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 h1:clHU5fm//kWS1C2HgtgWxfQbFbx4
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY=
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.11 h1:R3S5odXTsflG7xUp9S2AsewSXtQi1LBd+stJ5OpCIog=
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.11/go.mod h1:OekzWXyZi3ptl+YoKmm+G5ODIa4BDEArvZv8gHrQb5s=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 h1:2HvVAIq+YqgGotK6EkMf+KIEqTISmTYh5zLpYyeTo1Y=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20/go.mod h1:V4X406Y666khGa8ghKmphma/7C0DAtEQYhkq9z4vpbk=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 h1:XAq62tBTJP/85lFD5oqOOe7YYgWxY9LvWq8plyDvDVg=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 h1:X1Tow7suZk9UCJHE1Iw9GMZJJl0dAnKXXP1NaSDHwmw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19/go.mod h1:/rARO8psX+4sfjUQXp5LLifjUt8DuATZ31WptNJTyQA=
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.8 h1:mGgiunl7ZwOwhpJwJNF4JfsZFYJp08wjyS3NqFQe3ws=
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.8/go.mod h1:KdM2EhXeHfeBQz5keOvv/FM7kbesjCWm7HEEyJe3frs=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.8/go.mod h1:LXypKvk85AROkKhOG6/YEcHFPoX+prKTowKnVdcaIxE=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 h1:Y2cAXlClHsXkkOvWZFXATr34b0hxxloeQu/pAZz2row=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.7/go.mod h1:idzZ7gmDeqeNrSPkdbtMp9qWMgcBwykA7P7Rzh5DXVU=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1 h1:ZtgZeMPJH8+/vNs9vJFFLI0QEzYbcN0p7x1/FFwyROc=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1/go.mod h1:Bar4MrRxeqdn6XIh8JGfiXuFRmyrrsZNTJotxEJmWW0=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 h1:kiIDLZ005EcKomYYITtfsjn7dtOwHDOFy7IbPXKek2o=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.13/go.mod h1:2h/xGEowcW/g38g06g3KpRWDlT+OTfxxI0o1KqayAB8=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 h1:jzKAXIlhZhJbnYwHbvUQZEB8KfgAEuG0dc08Bkda7NU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17/go.mod h1:Al9fFsXjv4KfbzQHGe6V4NZSZQXecFcvaIF4e70FoRA=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 h1:Cng+OOwCHmFljXIxpEVXAGMnBia8MSU6Ch5i9PgBkcU=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.9/go.mod h1:LrlIndBDdjA/EeXeyNBle+gyCwTlizzW5ycgWnvIxkk=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 h1:iSsvB9EtQ09YrsmIc44Heqlx5ByGErqhPK1ZQLppias=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.12/go.mod h1:fEWYKTRGoZNl8tZ77i61/ccwOMJdGxwOhWCkp6TXAr0=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 h1:EnUdUqRP1CNzt2DkV67tJx6XDN4xlfBFm+bzeNOQVb0=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16/go.mod h1:Jic/xv0Rq/pFNCh3WwpH4BEqdbSAl+IyHro8LbibHD8=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 h1:XQTQTF75vnug2TXS8m7CVJfC2nniYPZnO1D4Np761Oo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.8/go.mod h1:Xgx+PR1NUOjNmQY+tRMnouRp83JRM8pRMw/vCaVhPkI=
github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=
github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
@@ -1404,8 +1404,8 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=
google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU=
google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=

View File

@@ -1 +0,0 @@
580515b544d5c966edc6f782c9ae88e21a9e10c786a7d6c5fd4b52613f321076 helm-v3.20.1-darwin-amd64.tar.gz

View File

@@ -1 +0,0 @@
75cc96ac3fe8b8b9928eb051e55698e98d1e026967b6bffe4f0f3c538a551b65 helm-v3.20.1-darwin-arm64.tar.gz

View File

@@ -1 +0,0 @@
0165ee4a2db012cc657381001e593e981f42aa5707acdd50658326790c9d0dc3 helm-v3.20.1-linux-amd64.tar.gz

View File

@@ -1 +0,0 @@
56b9d1b0e0efbb739be6e68a37860ace8ec9c7d3e6424e3b55d4c459bc3a0401 helm-v3.20.1-linux-arm64.tar.gz

View File

@@ -1 +0,0 @@
77b7d9bc62b209c044b873bc773055c5c0d17ef055e54c683f33209ebbe8883c helm-v3.20.1-linux-ppc64le.tar.gz

View File

@@ -1 +0,0 @@
3c43d45149a425c7bf15ba3653ddee13e7b1a4dd6d4534397b6f317f83c51b58 helm-v3.20.1-linux-s390x.tar.gz

View File

@@ -11,7 +11,7 @@
# Use ./hack/installers/checksums/add-helm-checksums.sh and
# add-kustomize-checksums.sh to help download checksums.
###############################################################################
helm3_version=3.20.1
helm3_version=3.19.4
kustomize5_version=5.8.1
protoc_version=29.3
oras_version=1.2.0

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: latest
newTag: v3.4.0-rc1

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: latest
newTag: v3.4.0-rc1
resources:
- ./application-controller
- ./dex

View File

@@ -316,12 +316,6 @@ spec:
name: argocd-cmd-params-cm
key: server.webhook.parallelism.limit
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
name: argocd-cmd-params-cm
key: server.glob.cache.size
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:

View File

@@ -31332,7 +31332,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31473,7 +31473,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -31601,7 +31601,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -31910,7 +31910,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -31963,7 +31963,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -32366,7 +32366,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -31300,7 +31300,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31429,7 +31429,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -31738,7 +31738,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -31791,7 +31791,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -32194,7 +32194,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: latest
newTag: v3.4.0-rc1

View File

@@ -12,7 +12,7 @@ patches:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: latest
newTag: v3.4.0-rc1
resources:
- ../../base/application-controller
- ../../base/applicationset-controller

View File

@@ -32758,7 +32758,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -32899,7 +32899,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -33057,7 +33057,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -33159,7 +33159,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -33283,7 +33283,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -33618,7 +33618,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -33671,7 +33671,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -34058,12 +34058,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -34106,7 +34100,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -34538,7 +34532,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -32728,7 +32728,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -32887,7 +32887,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -32989,7 +32989,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -33113,7 +33113,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -33448,7 +33448,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -33501,7 +33501,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -33888,12 +33888,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -33936,7 +33930,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -34368,7 +34362,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -2005,7 +2005,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2146,7 +2146,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2304,7 +2304,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2406,7 +2406,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2530,7 +2530,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2865,7 +2865,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2918,7 +2918,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -3305,12 +3305,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -3353,7 +3347,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3785,7 +3779,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1975,7 +1975,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2134,7 +2134,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2236,7 +2236,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2360,7 +2360,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2695,7 +2695,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2748,7 +2748,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -3135,12 +3135,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -3183,7 +3177,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3615,7 +3609,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -31776,7 +31776,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31917,7 +31917,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -32075,7 +32075,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -32177,7 +32177,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -32279,7 +32279,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -32588,7 +32588,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -32641,7 +32641,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -33026,12 +33026,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -33074,7 +33068,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -33506,7 +33500,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

22
manifests/install.yaml generated
View File

@@ -31744,7 +31744,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31903,7 +31903,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -32005,7 +32005,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -32107,7 +32107,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -32416,7 +32416,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -32469,7 +32469,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -32854,12 +32854,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -32902,7 +32896,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -33334,7 +33328,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1023,7 +1023,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1164,7 +1164,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1322,7 +1322,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1424,7 +1424,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1526,7 +1526,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1835,7 +1835,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1888,7 +1888,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2273,12 +2273,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -2321,7 +2315,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2753,7 +2747,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -991,7 +991,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1150,7 +1150,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1252,7 +1252,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1354,7 +1354,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1663,7 +1663,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1716,7 +1716,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2101,12 +2101,6 @@ spec:
key: server.webhook.parallelism.limit
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_SERVER_GLOB_CACHE_SIZE
valueFrom:
configMapKeyRef:
key: server.glob.cache.size
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING
valueFrom:
configMapKeyRef:
@@ -2149,7 +2143,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2581,7 +2575,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:latest
image: quay.io/argoproj/argocd:v3.4.0-rc1
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -15,13 +15,10 @@ import (
"regexp"
"sort"
"strings"
gosync "sync"
"time"
"github.com/TomOnTime/utfutil"
"github.com/bmatcuk/doublestar/v4"
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
gocache "github.com/patrickmn/go-cache"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo-cd/v3/util/oci"
@@ -98,8 +95,6 @@ type Service struct {
newGitClient func(rawRepoURL string, root string, creds git.Creds, insecure bool, enableLfs bool, proxy string, noProxy string, opts ...git.ClientOpts) (git.Client, error)
newHelmClient func(repoURL string, creds helm.Creds, enableOci bool, proxy string, noProxy string, opts ...helm.ClientOpts) helm.Client
initConstants RepoServerInitConstants
// stores cached symlink validation results
symlinksState *gocache.Cache
// now is usually just time.Now, but may be replaced by unit tests for testing purposes
now func() time.Time
}
@@ -161,7 +156,6 @@ func NewService(metricsServer *metrics.MetricsServer, cache *cache.Cache, initCo
ociPaths: ociRandomizedPaths,
gitRepoInitializer: directoryPermissionInitializer,
rootDir: rootDir,
symlinksState: gocache.New(12*time.Hour, time.Hour),
}
}
@@ -401,7 +395,7 @@ func (s *Service) runRepoOperation(
defer utilio.Close(closer)
if !s.initConstants.AllowOutOfBoundsSymlinks {
err := s.checkOutOfBoundsSymlinks(ociPath, revision, settings.noCache)
err := apppathutil.CheckOutOfBoundsSymlinks(ociPath)
if err != nil {
oobError := &apppathutil.OutOfBoundsSymlinkError{}
if errors.As(err, &oobError) {
@@ -442,7 +436,7 @@ func (s *Service) runRepoOperation(
}
defer utilio.Close(closer)
if !s.initConstants.AllowOutOfBoundsSymlinks {
err := s.checkOutOfBoundsSymlinks(chartPath, revision, settings.noCache)
err := apppathutil.CheckOutOfBoundsSymlinks(chartPath)
if err != nil {
oobError := &apppathutil.OutOfBoundsSymlinkError{}
if errors.As(err, &oobError) {
@@ -471,7 +465,7 @@ func (s *Service) runRepoOperation(
defer utilio.Close(closer)
if !s.initConstants.AllowOutOfBoundsSymlinks {
err := s.checkOutOfBoundsSymlinks(gitClient.Root(), revision, settings.noCache, ".git")
err := apppathutil.CheckOutOfBoundsSymlinks(gitClient.Root())
if err != nil {
oobError := &apppathutil.OutOfBoundsSymlinkError{}
if errors.As(err, &oobError) {
@@ -595,25 +589,6 @@ func resolveReferencedSources(hasMultipleSources bool, source *v1alpha1.Applicat
return repoRefs, nil
}
// checkOutOfBoundsSymlinks validates symlinks and caches validation result in memory
func (s *Service) checkOutOfBoundsSymlinks(rootPath string, version string, noCache bool, skipPaths ...string) error {
key := rootPath + "/" + version + "/" + strings.Join(skipPaths, ",")
ok := false
var checker any
if !noCache {
checker, ok = s.symlinksState.Get(key)
}
if !ok {
checker = gosync.OnceValue(func() error {
return apppathutil.CheckOutOfBoundsSymlinks(rootPath, skipPaths...)
})
s.symlinksState.Set(key, checker, gocache.DefaultExpiration)
}
return checker.(func() error)()
}
func (s *Service) GenerateManifest(ctx context.Context, q *apiclient.ManifestRequest) (*apiclient.ManifestResponse, error) {
var res *apiclient.ManifestResponse
var err error
@@ -681,13 +656,6 @@ func (s *Service) GenerateManifest(ctx context.Context, q *apiclient.ManifestReq
return nil, err
}
}
// Convert typed errors to gRPC status codes so callers can use status.Code()
// rather than string matching.
var globNoMatch *GlobNoMatchError
if errors.As(err, &globNoMatch) {
return nil, status.Error(codes.NotFound, err.Error())
}
return res, err
}
@@ -889,7 +857,7 @@ func (s *Service) runManifestGenAsync(ctx context.Context, repoRoot, commitSHA,
// Symlink check must happen after acquiring lock.
if !s.initConstants.AllowOutOfBoundsSymlinks {
err := s.checkOutOfBoundsSymlinks(gitClient.Root(), commitSHA, q.NoCache, ".git")
err := apppathutil.CheckOutOfBoundsSymlinks(gitClient.Root())
if err != nil {
oobError := &apppathutil.OutOfBoundsSymlinkError{}
if errors.As(err, &oobError) {
@@ -1408,55 +1376,19 @@ func getResolvedValueFiles(
gitRepoPaths utilio.TempPaths,
ignoreMissingValueFiles bool,
) ([]pathutil.ResolvedFilePath, error) {
// Pre-collect resolved paths for all explicit (non-glob) entries. This allows glob
// expansion to skip files that also appear explicitly, so the explicit entry controls
// the final position. For example, with ["*.yaml", "c.yaml"], c.yaml is excluded from
// the glob expansion and placed at the end where it was explicitly listed.
explicitPaths := make(map[pathutil.ResolvedFilePath]struct{})
for _, rawValueFile := range rawValueFiles {
referencedSource := getReferencedSource(rawValueFile, refSources)
var resolved pathutil.ResolvedFilePath
var err error
if referencedSource != nil {
resolved, err = getResolvedRefValueFile(rawValueFile, env, allowedValueFilesSchemas, referencedSource.Repo.Repo, gitRepoPaths)
} else {
resolved, _, err = pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, env.Envsubst(rawValueFile), allowedValueFilesSchemas)
}
if err != nil {
continue // resolution errors will be surfaced in the main loop below
}
if !isGlobPath(string(resolved)) {
explicitPaths[resolved] = struct{}{}
}
}
var resolvedValueFiles []pathutil.ResolvedFilePath
seen := make(map[pathutil.ResolvedFilePath]struct{})
appendUnique := func(p pathutil.ResolvedFilePath) {
if _, ok := seen[p]; !ok {
seen[p] = struct{}{}
resolvedValueFiles = append(resolvedValueFiles, p)
}
}
for _, rawValueFile := range rawValueFiles {
isRemote := false
var resolvedPath pathutil.ResolvedFilePath
var err error
referencedSource := getReferencedSource(rawValueFile, refSources)
// effectiveRoot is the repository root used for the symlink boundary check
// on glob matches. For ref-source paths this is the external repo's checkout
// directory; for local paths it is the main repo root.
effectiveRoot := repoRoot
if referencedSource != nil {
// If the $-prefixed path appears to reference another source, do env substitution _after_ resolving that source.
resolvedPath, err = getResolvedRefValueFile(rawValueFile, env, allowedValueFilesSchemas, referencedSource.Repo.Repo, gitRepoPaths)
if err != nil {
return nil, fmt.Errorf("error resolving value file path: %w", err)
}
if refRepoPath := gitRepoPaths.GetPathIfExists(git.NormalizeGitURL(referencedSource.Repo.Repo)); refRepoPath != "" {
effectiveRoot = refRepoPath
}
} else {
// This will resolve val to an absolute path (or a URL)
resolvedPath, isRemote, err = pathutil.ResolveValueFilePathOrUrl(appPath, repoRoot, env.Envsubst(rawValueFile), allowedValueFilesSchemas)
@@ -1465,38 +1397,6 @@ func getResolvedValueFiles(
}
}
// If the resolved path contains a glob pattern, expand it to all matching files.
// doublestar.FilepathGlob is used (consistent with AppSet generators) because it supports
// ** for recursive matching in addition to all standard glob patterns (*,?,[).
// Matches are returned in lexical order, which determines helm's merge precedence
// (later files override earlier ones). Glob patterns are only expanded for local files;
// remote value file URLs (e.g. https://...) are passed through as-is.
// If the glob matches no files and ignoreMissingValueFiles is true, skip it silently.
// Otherwise, return an error — consistent with how missing non-glob value files are handled.
if !isRemote && isGlobPath(string(resolvedPath)) {
matches, err := doublestar.FilepathGlob(string(resolvedPath))
if err != nil {
return nil, fmt.Errorf("error expanding glob pattern %q: %w", rawValueFile, err)
}
if len(matches) == 0 {
if ignoreMissingValueFiles {
log.Debugf(" %s values file glob matched no files", rawValueFile)
continue
}
return nil, &GlobNoMatchError{Pattern: rawValueFile}
}
if err := verifyGlobMatchesWithinRoot(matches, effectiveRoot); err != nil {
return nil, fmt.Errorf("glob pattern %q: %w", rawValueFile, err)
}
for _, match := range matches {
// Skip files that are also listed explicitly - they will be placed
// at their explicit position rather than the glob's position.
if _, isExplicit := explicitPaths[pathutil.ResolvedFilePath(match)]; !isExplicit {
appendUnique(pathutil.ResolvedFilePath(match))
}
}
continue
}
if !isRemote {
_, err = os.Stat(string(resolvedPath))
if os.IsNotExist(err) {
@@ -1507,9 +1407,8 @@ func getResolvedValueFiles(
}
}
appendUnique(resolvedPath)
resolvedValueFiles = append(resolvedValueFiles, resolvedPath)
}
log.Infof("resolved value files: %v", resolvedValueFiles)
return resolvedValueFiles, nil
}
@@ -1579,61 +1478,6 @@ func getRepoCredential(repoCredentials []*v1alpha1.RepoCreds, repoURL string) *v
return nil
}
// GlobNoMatchError is returned when a glob pattern in valueFiles matches no files.
// It is a runtime condition (the files may be added later), not a spec error.
type GlobNoMatchError struct {
Pattern string
}
func (e *GlobNoMatchError) Error() string {
return fmt.Sprintf("values file glob %q matched no files", e.Pattern)
}
// isGlobPath reports whether path contains any glob metacharacters
// supported by doublestar: *, ?, or [. The ** pattern is covered by *.
func isGlobPath(path string) bool {
return strings.ContainsAny(path, "*?[")
}
// verifyGlobMatchesWithinRoot resolves symlinks for each glob match and verifies
// that the resolved target is within effectiveRoot. It protects against symlinks
// inside the repository that point to targets outside it.
//
// doublestar.FilepathGlob uses os.Lstat, so it returns the path of the symlink
// itself (which lives inside the repo) rather than the symlink target. If the
// target is outside the repo, Helm would still follow the link and read the
// external file. This function catches that case before the paths reach Helm.
//
// Both effectiveRoot and each match are canonicalized via filepath.EvalSymlinks
// so the prefix comparison is correct on systems where the working directory is
// itself under a symlink chain (e.g. /var -> /private/var on macOS).
func verifyGlobMatchesWithinRoot(matches []string, effectiveRoot string) error {
absRoot, err := filepath.Abs(effectiveRoot)
if err != nil {
return fmt.Errorf("error resolving repo root: %w", err)
}
canonicalRoot, err := filepath.EvalSymlinks(absRoot)
if err != nil {
return fmt.Errorf("error resolving symlinks in repo root: %w", err)
}
requiredRootPath := canonicalRoot
if !strings.HasSuffix(requiredRootPath, string(os.PathSeparator)) {
requiredRootPath += string(os.PathSeparator)
}
for _, match := range matches {
realMatch, err := filepath.EvalSymlinks(match)
if err != nil {
return fmt.Errorf("error resolving symlink for glob match %q: %w", match, err)
}
// Allow the match to resolve exactly to the root (realMatch+sep == requiredRootPath)
// or to any path beneath it (HasPrefix).
if realMatch+string(os.PathSeparator) != requiredRootPath && !strings.HasPrefix(realMatch, requiredRootPath) {
return fmt.Errorf("glob match %q resolved to outside repository root", match)
}
}
return nil
}
type (
GenerateManifestOpt func(*generateManifestOpt)
generateManifestOpt struct {

View File

@@ -3895,567 +3895,6 @@ func Test_getResolvedValueFiles(t *testing.T) {
}
}
func Test_getResolvedValueFiles_glob(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
paths := utilio.NewRandomizedTempPaths(tempDir)
paths.Add(git.NormalizeGitURL("https://github.com/org/repo1"), path.Join(tempDir, "repo1"))
// main-repo files
require.NoError(t, os.MkdirAll(path.Join(tempDir, "main-repo", "prod", "nested"), 0o755))
require.NoError(t, os.MkdirAll(path.Join(tempDir, "main-repo", "staging"), 0o755))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "a.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "b.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "staging", "e.yaml"), []byte{}, 0o644))
// main-repo envs: used to verify depth-order with ** (z.yaml sorts after nested/ alphabetically
// but is still returned before nested/c.yaml because doublestar matches depth-0 files first).
require.NoError(t, os.MkdirAll(path.Join(tempDir, "main-repo", "envs", "nested"), 0o755))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "envs", "a.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "envs", "z.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"), []byte{}, 0o644))
// repo1 files
require.NoError(t, os.MkdirAll(path.Join(tempDir, "repo1", "prod", "nested"), 0o755))
require.NoError(t, os.WriteFile(path.Join(tempDir, "repo1", "prod", "x.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "repo1", "prod", "y.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(path.Join(tempDir, "repo1", "prod", "nested", "z.yaml"), []byte{}, 0o644))
tests := []struct {
name string
rawPath string
env *v1alpha1.Env
refSources map[string]*v1alpha1.RefTarget
expectedPaths []string
ignoreMissingValueFiles bool
expectedErr bool
}{
{
name: "local glob matches multiple files",
rawPath: "prod/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
expectedPaths: []string{
// the order is a.yaml before b.yaml
// since doublestar.FilepathGlob returns lexical order
path.Join(tempDir, "main-repo", "prod", "a.yaml"),
path.Join(tempDir, "main-repo", "prod", "b.yaml"),
},
},
{
name: "local glob matches no files returns error",
rawPath: "dev/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
expectedPaths: nil,
expectedErr: true,
},
{
name: "local glob matches no files with ignoreMissingValueFiles set to true",
rawPath: "dev/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
ignoreMissingValueFiles: true,
expectedPaths: nil,
},
{
name: "referenced glob matches multiple files in external repo",
rawPath: "$ref/prod/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{
"$ref": {
Repo: v1alpha1.Repository{
Repo: "https://github.com/org/repo1",
},
},
},
expectedPaths: []string{
path.Join(tempDir, "repo1", "prod", "x.yaml"),
path.Join(tempDir, "repo1", "prod", "y.yaml"),
},
},
{
name: "ref glob with env var in path",
rawPath: "$ref/$ENV/*.yaml",
env: &v1alpha1.Env{
&v1alpha1.EnvEntry{
Name: "ENV",
Value: "prod",
},
},
refSources: map[string]*v1alpha1.RefTarget{
"$ref": {
Repo: v1alpha1.Repository{
Repo: "https://github.com/org/repo1",
},
},
},
expectedPaths: []string{
path.Join(tempDir, "repo1", "prod", "x.yaml"),
path.Join(tempDir, "repo1", "prod", "y.yaml"),
},
},
{
name: "local glob single match",
rawPath: "prod/a*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
expectedPaths: []string{path.Join(tempDir, "main-repo", "prod", "a.yaml")},
},
{
name: "recursive glob matches files at all depths under a subdirectory",
// ** matches zero or more path segments, so prod/**/*.yaml covers both
// prod/*.yaml (zero intermediate segments) and prod/nested/*.yaml (one segment), etc.
rawPath: "prod/**/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
// lexical order: prod/a.yaml, prod/b.yaml, prod/nested/c.yaml, prod/nested/d.yaml
expectedPaths: []string{
path.Join(tempDir, "main-repo", "prod", "a.yaml"),
path.Join(tempDir, "main-repo", "prod", "b.yaml"),
path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"),
path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"),
},
},
{
name: "recursive glob from repo root matches yaml files across all directories",
rawPath: "**/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
// doublestar traverses directories in lexical order, processing each directory's
// own files before its subdirectories. So the order is:
// envs/ flat files → envs/nested/ files → prod/ flat files → prod/nested/ files → staging/ files
expectedPaths: []string{
path.Join(tempDir, "main-repo", "envs", "a.yaml"),
path.Join(tempDir, "main-repo", "envs", "z.yaml"),
path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"),
path.Join(tempDir, "main-repo", "prod", "a.yaml"),
path.Join(tempDir, "main-repo", "prod", "b.yaml"),
path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"),
path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"),
path.Join(tempDir, "main-repo", "staging", "e.yaml"),
},
},
{
name: "recursive glob anchored to a named subdirectory matches at any depth",
rawPath: "**/nested/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
expectedPaths: []string{
path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"),
path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"),
path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"),
},
},
{
name: "recursive glob with no matches and ignoreMissingValueFiles skips silently",
rawPath: "**/nonexistent/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
ignoreMissingValueFiles: true,
expectedPaths: nil,
},
{
name: "recursive glob with no matches returns error",
rawPath: "**/nonexistent/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
expectedPaths: nil,
expectedErr: true,
},
{
// z.yaml sorts after "nested/" alphabetically by full path, but doublestar processes
// each directory's own files before descending into subdirectories. So for envs/**/*.yaml:
// envs/ flat files (a, z) come before envs/nested/ files (c), giving:
// a.yaml, z.yaml, nested/c.yaml — not a.yaml, nested/c.yaml, z.yaml.
name: "** depth-order: flat files before nested even when flat file sorts after nested/ alphabetically",
rawPath: "envs/**/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{},
expectedPaths: []string{
path.Join(tempDir, "main-repo", "envs", "a.yaml"),
path.Join(tempDir, "main-repo", "envs", "z.yaml"),
path.Join(tempDir, "main-repo", "envs", "nested", "c.yaml"),
},
},
{
name: "recursive glob in external ref repo",
rawPath: "$ref/prod/**/*.yaml",
env: &v1alpha1.Env{},
refSources: map[string]*v1alpha1.RefTarget{
"$ref": {
Repo: v1alpha1.Repository{
Repo: "https://github.com/org/repo1",
},
},
},
expectedPaths: []string{
// doublestar matches zero path segments before recursing into subdirectories,
// so flat files (x, y) come before nested ones (nested/z).
path.Join(tempDir, "repo1", "prod", "x.yaml"),
path.Join(tempDir, "repo1", "prod", "y.yaml"),
path.Join(tempDir, "repo1", "prod", "nested", "z.yaml"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
repoPath := path.Join(tempDir, "main-repo")
resolvedPaths, err := getResolvedValueFiles(repoPath, repoPath, tt.env, []string{}, []string{tt.rawPath}, tt.refSources, paths, tt.ignoreMissingValueFiles)
if tt.expectedErr {
require.Error(t, err)
return
}
require.NoError(t, err)
require.Len(t, resolvedPaths, len(tt.expectedPaths))
for i, p := range tt.expectedPaths {
assert.Equal(t, p, string(resolvedPaths[i]))
}
})
}
// Deduplication: first occurrence of a resolved path wins. Subsequent references to the
// same file, whether explicit or via glob are silently dropped. This preserves the
// merge-precedence position set by the first mention of each file.
t.Run("glob then explicit: explicit entry placed at end, giving it highest Helm precedence", func(t *testing.T) {
t.Parallel()
repoPath := path.Join(tempDir, "main-repo")
resolvedPaths, err := getResolvedValueFiles(
repoPath, repoPath,
&v1alpha1.Env{}, []string{},
[]string{
"envs/*.yaml", // glob - z.yaml is explicit so skipped; only a.yaml added
"envs/z.yaml", // explicit - placed last, highest precedence
},
map[string]*v1alpha1.RefTarget{}, paths, false,
)
require.NoError(t, err)
require.Len(t, resolvedPaths, 2)
assert.Equal(t, path.Join(tempDir, "main-repo", "envs", "a.yaml"), string(resolvedPaths[0]))
assert.Equal(t, path.Join(tempDir, "main-repo", "envs", "z.yaml"), string(resolvedPaths[1]))
})
t.Run("explicit path before glob: explicit position is kept, glob re-match is dropped", func(t *testing.T) {
t.Parallel()
repoPath := path.Join(tempDir, "main-repo")
resolvedPaths, err := getResolvedValueFiles(
repoPath, repoPath,
&v1alpha1.Env{}, []string{},
[]string{
"prod/a.yaml", // explicit locks in position 0
"prod/*.yaml", // glob - a.yaml already seen, only b.yaml is new
},
map[string]*v1alpha1.RefTarget{}, paths, false,
)
require.NoError(t, err)
require.Len(t, resolvedPaths, 2)
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[0]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[1]))
})
t.Run("glob before explicit path: explicit position wins, glob skips the explicitly listed file", func(t *testing.T) {
t.Parallel()
repoPath := path.Join(tempDir, "main-repo")
resolvedPaths, err := getResolvedValueFiles(
repoPath, repoPath,
&v1alpha1.Env{}, []string{},
[]string{
"prod/*.yaml", // glob - a.yaml is explicit so skipped; only b.yaml added (pos 0)
"prod/a.yaml", // explicit - placed here at pos 1 (highest precedence)
},
map[string]*v1alpha1.RefTarget{}, paths, false,
)
require.NoError(t, err)
require.Len(t, resolvedPaths, 2)
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[0]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[1]))
})
t.Run("two overlapping globs: second glob only adds files not matched by first", func(t *testing.T) {
t.Parallel()
repoPath := path.Join(tempDir, "main-repo")
resolvedPaths, err := getResolvedValueFiles(
repoPath, repoPath,
&v1alpha1.Env{}, []string{},
[]string{
"prod/*.yaml", // adds a.yaml, b.yaml
"prod/**/*.yaml", // a.yaml, b.yaml already seen; adds nested/c.yaml, nested/d.yaml
},
map[string]*v1alpha1.RefTarget{}, paths, false,
)
require.NoError(t, err)
require.Len(t, resolvedPaths, 4)
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[0]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[1]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"), string(resolvedPaths[2]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"), string(resolvedPaths[3]))
})
t.Run("explicit paths take priority: globs skip explicitly listed files, which are placed at their explicit positions", func(t *testing.T) {
t.Parallel()
repoPath := path.Join(tempDir, "main-repo")
resolvedPaths, err := getResolvedValueFiles(
repoPath, repoPath,
&v1alpha1.Env{}, []string{},
[]string{
"prod/a.yaml", // explicit - pos 0
"prod/*.yaml", // a.yaml and b.yaml are both explicit, skipped entirely
"prod/b.yaml", // explicit - pos 1
"prod/**/*.yaml", // a.yaml, b.yaml, nested/c.yaml all explicit and skipped; nested/d.yaml added - pos 2
"prod/nested/c.yaml", // explicit - pos 3
},
map[string]*v1alpha1.RefTarget{}, paths, false,
)
require.NoError(t, err)
require.Len(t, resolvedPaths, 4)
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "a.yaml"), string(resolvedPaths[0]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "b.yaml"), string(resolvedPaths[1]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "d.yaml"), string(resolvedPaths[2]))
assert.Equal(t, path.Join(tempDir, "main-repo", "prod", "nested", "c.yaml"), string(resolvedPaths[3]))
})
}
func Test_verifyGlobMatchesWithinRoot(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
repoDir := filepath.Join(tempDir, "repo")
outsideDir := filepath.Join(tempDir, "outside")
require.NoError(t, os.MkdirAll(filepath.Join(repoDir, "values", "sub"), 0o755))
require.NoError(t, os.MkdirAll(outsideDir, 0o755))
// Files used as symlink targets
inRepoFile := filepath.Join(repoDir, "values", "real.yaml")
outsideFile := filepath.Join(outsideDir, "secret.yaml")
require.NoError(t, os.WriteFile(inRepoFile, []byte{}, 0o644))
require.NoError(t, os.WriteFile(outsideFile, []byte("password: hunter2"), 0o644))
// Symlink inside repo → file inside repo (safe)
inRepoLink := filepath.Join(repoDir, "values", "inrepo-link.yaml")
require.NoError(t, os.Symlink(inRepoFile, inRepoLink))
// Symlink inside repo → file outside repo (escape)
escapeLink := filepath.Join(repoDir, "values", "escape-link.yaml")
require.NoError(t, os.Symlink(outsideFile, escapeLink))
// Two-hop symlink: inside repo → another symlink (still inside) → file inside repo
hop1 := filepath.Join(repoDir, "values", "hop1.yaml")
require.NoError(t, os.Symlink(inRepoLink, hop1)) // hop1 → inRepoLink → real.yaml
// Two-hop symlink: inside repo → another symlink (inside repo) → file outside repo
hop2 := filepath.Join(repoDir, "values", "hop2.yaml")
require.NoError(t, os.Symlink(escapeLink, hop2)) // hop2 → escape-link → secret.yaml
tests := []struct {
name string
matches []string
expectErr bool
errContains string
}{
{
name: "regular file inside root passes",
matches: []string{inRepoFile},
},
{
name: "symlink inside root pointing to file inside root passes",
matches: []string{inRepoLink},
},
{
name: "two-hop chain that stays within root passes",
matches: []string{hop1},
},
{
name: "symlink pointing directly outside root is rejected",
matches: []string{escapeLink},
expectErr: true,
errContains: "resolved to outside repository root",
},
{
name: "two-hop chain that escapes root is rejected",
matches: []string{hop2},
expectErr: true,
errContains: "resolved to outside repository root",
},
{
name: "multiple matches all inside root pass",
matches: []string{inRepoFile, inRepoLink, hop1},
},
{
name: "one bad match in a list fails the whole call",
matches: []string{inRepoFile, escapeLink},
expectErr: true,
errContains: "resolved to outside repository root",
},
{
name: "empty matches list is a no-op",
matches: []string{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
err := verifyGlobMatchesWithinRoot(tt.matches, repoDir)
if tt.expectErr {
require.Error(t, err)
assert.Contains(t, err.Error(), tt.errContains)
} else {
require.NoError(t, err)
}
})
}
}
// Test_getResolvedValueFiles_glob_symlink_escape is an integration-level check
// that verifyGlobMatchesWithinRoot is wired into glob expansion correctly: a
// symlink inside the repo pointing outside must cause getResolvedValueFiles to
// return an error rather than silently including the external file.
func Test_getResolvedValueFiles_glob_symlink_escape(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
paths := utilio.NewRandomizedTempPaths(tempDir)
repoDir := filepath.Join(tempDir, "repo")
outsideDir := filepath.Join(tempDir, "outside")
require.NoError(t, os.MkdirAll(filepath.Join(repoDir, "values"), 0o755))
require.NoError(t, os.MkdirAll(outsideDir, 0o755))
require.NoError(t, os.WriteFile(filepath.Join(repoDir, "values", "base.yaml"), []byte{}, 0o644))
require.NoError(t, os.WriteFile(filepath.Join(outsideDir, "secret.yaml"), []byte("password: hunter2"), 0o644))
require.NoError(t, os.Symlink(filepath.Join(outsideDir, "secret.yaml"), filepath.Join(repoDir, "values", "escape.yaml")))
_, err := getResolvedValueFiles(repoDir, repoDir, &v1alpha1.Env{}, []string{}, []string{"values/*.yaml"}, map[string]*v1alpha1.RefTarget{}, paths, false)
require.Error(t, err)
assert.Contains(t, err.Error(), "resolved to outside repository root")
}
func Test_isGlobPath(t *testing.T) {
tests := []struct {
path string
expected bool
}{
{
path: "prod/*.yaml",
expected: true,
},
{
path: "prod/?.yaml",
expected: true,
},
{
path: "prod[ab].yaml",
expected: true,
},
{
path: "prod/**/*.yaml",
expected: true,
},
{
path: "prod/values.yaml",
},
{
path: "values.yaml",
},
{
path: "",
},
{
path: "/absolute/path/to/*.yaml",
expected: true,
},
{
path: "/absolute/path/to/values.yaml",
},
{
path: "*",
expected: true,
},
{
path: "?",
expected: true,
},
{
path: "[",
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.path, func(t *testing.T) {
assert.Equal(t, tt.expected, isGlobPath(tt.path))
})
}
}
func Test_getReferencedSource(t *testing.T) {
t.Parallel()
refTarget := &v1alpha1.RefTarget{
Repo: v1alpha1.Repository{
Repo: "https://github.com/org/repo1",
},
}
tests := []struct {
name string
rawValueFile string
refSources map[string]*v1alpha1.RefTarget
expected *v1alpha1.RefTarget
}{
{
name: "ref with file path found in map",
rawValueFile: "$ref/values.yaml",
refSources: map[string]*v1alpha1.RefTarget{
"$ref": refTarget,
},
expected: refTarget,
},
{
name: "ref with file path not in map",
rawValueFile: "$ref/values.yaml",
refSources: map[string]*v1alpha1.RefTarget{},
expected: nil,
},
{
name: "bare ref without file path found in map",
rawValueFile: "$ref",
refSources: map[string]*v1alpha1.RefTarget{
"$ref": refTarget,
},
expected: refTarget,
},
{
name: "empty string returns nil",
rawValueFile: "",
refSources: map[string]*v1alpha1.RefTarget{
"$ref": refTarget,
},
expected: nil,
},
{
name: "no $ prefix returns nil",
rawValueFile: "values.yaml",
refSources: map[string]*v1alpha1.RefTarget{
"$ref": refTarget,
},
expected: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
result := getReferencedSource(tt.rawValueFile, tt.refSources)
assert.Equal(t, tt.expected, result)
})
}
}
func TestErrorGetGitDirectories(t *testing.T) {
// test not using the cache
root := "./testdata/git-files-dirs"

View File

@@ -1,4 +1,4 @@
FROM docker.io/library/redis:8.6.1@sha256:a019c005570189bb038155c1dfb1a269b59b83f9ceee22fd5f42de205ac19c06 AS redis
FROM docker.io/library/redis:8.6.1@sha256:1c054d54ecd1597bba52f4304bca5afbc5565ebe614c5b3d7dc5b7f8a0cd768d AS redis
# There are libraries we will want to copy from here in the final stage of the
# build, but the COPY directive does not have a way to determine system
@@ -14,7 +14,7 @@ FROM docker.io/library/registry:3.0@sha256:6c5666b861f3505b116bb9aa9b25175e71210
FROM docker.io/bitnamilegacy/kubectl:1.32@sha256:9524faf8e3cefb47fa28244a5d15f95ec21a73d963273798e593e61f80712333 AS kubectl
FROM docker.io/library/ubuntu:26.04@sha256:5798086f1a3a79f93e33f22d50cb326ebe769272cb84f646259d4995c019877b
FROM docker.io/library/ubuntu:26.04@sha256:fed6ddb82c61194e1814e93b59cfcb6759e5aa33c4e41bb3782313c2386ed6df
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -156,79 +156,6 @@ func TestHelmIgnoreMissingValueFiles(t *testing.T) {
Expect(ErrorRegex("Error: open .*does-not-exist-values.yaml: no such file or directory", ""))
}
// TestHelmGlobValueFiles verifies that a glob pattern in valueFiles expands to all matching
// files and that they are applied in lexical order (last file wins in helm merging).
// envs/*.yaml expands to envs/a.yaml then envs/b.yaml - b.yaml is last, so foo = "b-value".
func TestHelmGlobValueFiles(t *testing.T) {
fixture.SkipOnEnv(t, "HELM")
ctx := Given(t)
ctx.Path("helm-glob-values").
When().
CreateApp().
AppSet("--values", "envs/*.yaml").
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(HealthIs(health.HealthStatusHealthy)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(_ *Application) {
val := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(),
"get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string)
assert.Equal(t, "b-value", val)
})
}
// TestHelmRecursiveGlobValueFiles verifies that the ** double-star pattern recursively
// matches files at any depth. envs/**/*.yaml expands (zero-segments first) to:
// envs/a.yaml, envs/b.yaml, envs/nested/c.yaml - c.yaml is last, so foo = "c-value".
func TestHelmRecursiveGlobValueFiles(t *testing.T) {
fixture.SkipOnEnv(t, "HELM")
ctx := Given(t)
ctx.Path("helm-glob-values").
When().
CreateApp().
AppSet("--values", "envs/**/*.yaml").
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(HealthIs(health.HealthStatusHealthy)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(_ *Application) {
val := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(),
"get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string)
assert.Equal(t, "c-value", val)
})
}
// TestHelmGlobValueFilesNoMatch verifies that a glob pattern with no matching files
// surfaces as a comparison error on the application.
func TestHelmGlobValueFilesNoMatch(t *testing.T) {
fixture.SkipOnEnv(t, "HELM")
Given(t).
Path("helm-glob-values").
When().
CreateApp().
AppSet("--values", "nonexistent/*.yaml").
Then().
Expect(Condition(ApplicationConditionComparisonError, `values file glob "nonexistent/*.yaml" matched no files`))
}
// TestHelmGlobValueFilesIgnoreMissing verifies that a non-matching glob pattern is
// silently skipped when ignoreMissingValueFiles is set, and the app syncs successfully.
func TestHelmGlobValueFilesIgnoreMissing(t *testing.T) {
fixture.SkipOnEnv(t, "HELM")
Given(t).
Path("helm-glob-values").
When().
CreateApp().
AppSet("--values", "nonexistent/*.yaml", "--ignore-missing-value-files").
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(HealthIs(health.HealthStatusHealthy)).
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestHelmValuesMultipleUnset(t *testing.T) {
Given(t).
Path("helm").

View File

@@ -1,3 +0,0 @@
apiVersion: v2
version: 1.0.0
name: helm-glob-values

View File

@@ -1 +0,0 @@
foo: a-value

View File

@@ -1 +0,0 @@
foo: b-value

View File

@@ -1 +0,0 @@
foo: c-value

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: my-map
data:
foo: {{.Values.foo}}

View File

@@ -1 +0,0 @@
foo: default

View File

@@ -5,5 +5,4 @@ import "embed"
// Embedded contains embedded UI resources
//
//go:embed dist/app
//go:embed all:dist/app/assets/images/resources
var Embedded embed.FS

View File

@@ -8,7 +8,6 @@ import {services} from '../../../shared/services';
import {
ApplicationSyncWindowStatusIcon,
ComparisonStatusIcon,
formatApplicationSetProgressiveSyncStep,
getAppDefaultSource,
getAppDefaultSyncRevisionExtra,
getAppOperationState,
@@ -135,7 +134,7 @@ const ProgressiveSyncStatus = ({application}: {application: models.Application})
<div className='application-status-panel__item-value' style={{color: getProgressiveSyncStatusColor(appResource.status)}}>
{getProgressiveSyncStatusIcon({status: appResource.status})}&nbsp;{appResource.status}
</div>
{appResource?.step !== undefined && <div className='application-status-panel__item-value'>{formatApplicationSetProgressiveSyncStep(appResource.step)}</div>}
{appResource?.step && <div className='application-status-panel__item-value'>Wave: {appResource.step}</div>}
{lastTransitionTime && (
<div className='application-status-panel__item-name' style={{marginBottom: '0.5em'}}>
Last Transition: <br />

View File

@@ -21,7 +21,6 @@ export const resourceIconGroups = {
'kyverno.io': true,
'opentelemetry.io': true,
'projectcontour.io': true,
'promoter.argoproj.io': true,
'work.karmada.io': true,
'zookeeper.pravega.io': true,
};

View File

@@ -16,8 +16,7 @@ jest.mock('./resource-customizations', () => ({
resourceIconGroups: {
'*.crossplane.io': true,
'*.fluxcd.io': true,
'cert-manager.io': true,
'promoter.argoproj.io': true
'cert-manager.io': true
}
}));
@@ -72,14 +71,6 @@ describe('ResourceIcon', () => {
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/_.fluxcd.io/icon.svg');
});
it('should show group-based icon for promoter.argoproj.io', () => {
const testRenderer = renderer.create(<ResourceIcon group='promoter.argoproj.io' kind='PromotionStrategy' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/promoter.argoproj.io/icon.svg');
});
});
describe('fallback to kind-based icons (with non-matching group) - THIS IS THE BUG FIX', () => {

View File

@@ -1858,14 +1858,6 @@ export function getAppUrl(app: appModels.AbstractApplication): string {
return `${basePath}/${app.metadata.namespace}/${app.metadata.name}`;
}
/** RollingSync step for display; backend uses -1 when no step matches the app's labels. */
export function formatApplicationSetProgressiveSyncStep(step: string | undefined): string {
if (step === '-1') {
return 'Step: unmatched label';
}
return `Step: ${step ?? ''}`;
}
export const getProgressiveSyncStatusIcon = ({status, isButton}: {status: string; isButton?: boolean}) => {
const getIconProps = () => {
switch (status) {

View File

@@ -1,27 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Derived from GitOps Promoter plain icon (primary layer, no white outline):
https://github.com/argoproj-labs/gitops-promoter/blob/087fd273bbdad9c9669b93f520d6e4d1054d628f/docs/assets/logo/icon/primary.svg
Licensed under Apache License 2.0.
Single fill #8fa4b1 to match other Argo CD resource icons.
-->
<svg id="Layer_2" data-name="Layer 2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 557 556.93">
<defs>
<style>
.cls-1 {
fill: #8fa4b1;
}
</style>
</defs>
<g id="Graphics">
<g>
<ellipse class="cls-1" cx="131.66" cy="425.27" rx="31.79" ry="97.5" transform="translate(-262.15 217.66) rotate(-45)"/>
<ellipse class="cls-1" cx="78.3" cy="478.69" rx="23.36" ry="71.65" transform="translate(-315.55 195.58) rotate(-45)"/>
<ellipse class="cls-1" cx="35.47" cy="521.46" rx="15.55" ry="47.68" transform="translate(-358.33 177.81) rotate(-45)"/>
<ellipse class="cls-1" cx="410.41" cy="146.59" rx="23.4" ry="94.92" transform="translate(16.55 333.14) rotate(-45)"/>
<path class="cls-1" d="M142.99,424.54c-2.71,0-5.43-1.04-7.5-3.11-4.14-4.14-4.13-10.85.01-14.99l36.23-36.17c6.49,6.49,11.18,11.18,14.99,14.99l-36.24,36.18c-2.07,2.07-4.78,3.1-7.49,3.1Z"/>
<path class="cls-1" d="M528.29,28.8h0s-.03-.03-.05-.05c-.02-.02-.03-.03-.04-.05h0c-48.94-48.78-172.32-45.51-298.42,90.14-5.89,6.34-11.91,9.55-17.92,9.56-4.51,0-8.89-.07-13.17-.14-27.96-.47-54.36-.9-85.68,20.63-15.31,10.53-26.97,21.26-36.69,33.75l-45.01,68.39c-.77,1.48-1.01,2.99-.91,4.48l.1,1.08c1.49,8.26,13.83,15.91,19,16.67l65.4,8.3c9.42,1.36,11.81,2.27,16.61,9.14,2.43,8.19,2.71,9.87,2.76,18.76-.76,16.95,14.48,36.33,24.37,47.73l21.55,21.55-8.48-8.48,7.69-7.69c.06-.06.09-.13.15-.19l100.84-100.73c4.14-4.14,10.85-4.13,14.99,0,4.14,4.14,4.13,10.85,0,14.99l-87,86.91.02.02-21.69,21.69,13.09,13.09c11.4,9.88,30.78,25.13,47.73,24.37,8.89.05,10.57.33,18.76,2.76,6.87,4.8,7.77,7.2,9.14,16.61l8.3,65.41c.76,5.17,8.4,17.51,16.67,19,0,.01,1.08.11,1.08.1,1.5.09,3-.14,4.48-.91l68.39-45.01c12.5-9.72,23.22-21.38,33.75-36.69,21.54-31.32,21.1-57.72,20.63-85.68-.07-4.28-.15-8.66-.14-13.17,0-6.01,3.22-12.04,9.56-17.92,135.65-126.1,138.91-249.48,90.13-298.42ZM379.05,177.95c-34.12-34.44-62.15-77.59-61.65-107.87C386.6,18.94,468.63,3.78,511.18,43.34c.42.39.79.8,1.19,1.2l.05.05.05.05c.4.4.81.77,1.2,1.19,39.56,42.56,24.39,124.58-26.75,193.79-30.28.5-73.43-27.53-107.87-61.65Z"/>
<path class="cls-1" d="M382.59,69.55c-3.96,0-7.75-2.23-9.57-6.03-2.52-5.28-.28-11.61,5-14.13,30.34-14.48,63.95-19.59,94.66-14.38,5.77.98,9.66,6.45,8.68,12.22-.98,5.77-6.45,9.67-12.22,8.68-26.43-4.48-55.55,0-81.98,12.61-1.47.7-3.03,1.04-4.56,1.04Z"/>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.7 KiB

View File

@@ -45,28 +45,20 @@ func (e *OutOfBoundsSymlinkError) Error() string {
// CheckOutOfBoundsSymlinks determines if basePath contains any symlinks that
// are absolute or point to a path outside of the basePath. If found, an
// OutOfBoundsSymlinkError is returned.
func CheckOutOfBoundsSymlinks(basePath string, skipPaths ...string) error {
func CheckOutOfBoundsSymlinks(basePath string) error {
absBasePath, err := filepath.Abs(basePath)
if err != nil {
return fmt.Errorf("failed to get absolute path: %w", err)
}
skipPathsSet := map[string]bool{}
for _, p := range skipPaths {
skipPathsSet[filepath.Join(absBasePath, p)] = true
}
return filepath.Walk(absBasePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
// Ignore "no such file or directory" errors that can happen with
// Ignore "no such file or directory" errors than can happen with
// temporary files such as .git/*.lock
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("failed to walk for symlinks in %s: %w", absBasePath, err)
}
if skipPathsSet[path] {
return filepath.SkipDir
}
if files.IsSymlink(info) {
// We don't use filepath.EvalSymlinks because it fails without returning a path
// if the target doesn't exist.

View File

@@ -83,11 +83,6 @@ func TestBadSymlinks3(t *testing.T) {
assert.Equal(t, "badlink", oobError.File)
}
func TestBadSymlinksExcluded(t *testing.T) {
err := CheckOutOfBoundsSymlinks("./testdata/badlink", "badlink")
assert.NoError(t, err)
}
// No absolute symlinks allowed
func TestAbsSymlink(t *testing.T) {
const testDir = "./testdata/abslink"

View File

@@ -910,12 +910,6 @@ func verifyGenerateManifests(
// and not whether it actually contains any manifests.
_, err = repoClient.GenerateManifest(ctx, &req)
if err != nil {
// A glob pattern matching no files is a runtime condition, not a spec error —
// the files may be added later. Skip adding an InvalidSpecError here and let
// the app controller surface it as a ComparisonError during reconciliation.
if status.Code(err) == codes.NotFound && strings.Contains(err.Error(), "matched no files") {
continue
}
errMessage := fmt.Sprintf("Unable to generate manifests in %s: %s", source.Path, err)
conditions = append(conditions, argoappv1.ApplicationCondition{
Type: argoappv1.ApplicationConditionInvalidSpecError,

View File

@@ -360,8 +360,7 @@ func TestVerifyCommitSignature(t *testing.T) {
err = client.Init()
require.NoError(t, err)
// Use shallow fetch to avoid timeout fetching the entire repo
err = client.Fetch("", 1)
err = client.Fetch("", 0)
require.NoError(t, err)
commitSHA, err := client.LsRemote("HEAD")
@@ -370,18 +369,10 @@ func TestVerifyCommitSignature(t *testing.T) {
_, err = client.Checkout(commitSHA, true, true)
require.NoError(t, err)
// Fetch the specific commits needed for signature verification
signedCommit := "28027897aad1262662096745f2ce2d4c74d02b7f"
unsignedCommit := "85d660f0b967960becce3d49bd51c678ba2a5d24"
err = client.Fetch(signedCommit, 1)
require.NoError(t, err)
err = client.Fetch(unsignedCommit, 1)
require.NoError(t, err)
// 28027897aad1262662096745f2ce2d4c74d02b7f is a commit that is signed in the repo
// It doesn't matter whether we know the key or not at this stage
{
out, err := client.VerifyCommitSignature(signedCommit)
out, err := client.VerifyCommitSignature("28027897aad1262662096745f2ce2d4c74d02b7f")
require.NoError(t, err)
assert.NotEmpty(t, out)
assert.Contains(t, out, "gpg: Signature made")
@@ -389,7 +380,7 @@ func TestVerifyCommitSignature(t *testing.T) {
// 85d660f0b967960becce3d49bd51c678ba2a5d24 is a commit that is not signed
{
out, err := client.VerifyCommitSignature(unsignedCommit)
out, err := client.VerifyCommitSignature("85d660f0b967960becce3d49bd51c678ba2a5d24")
require.NoError(t, err)
assert.Empty(t, out)
}

View File

@@ -1,108 +1,26 @@
package glob
import (
"sync"
"github.com/gobwas/glob"
"github.com/golang/groupcache/lru"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/singleflight"
)
const (
// DefaultGlobCacheSize is the default maximum number of compiled glob patterns to cache.
// This limit prevents memory exhaustion from untrusted RBAC patterns.
// 10,000 patterns should be sufficient for most deployments while limiting
// memory usage to roughly ~10MB (assuming ~1KB per compiled pattern).
DefaultGlobCacheSize = 10000
)
type compileFn func(pattern string, separators ...rune) (glob.Glob, error)
var (
// globCache stores compiled glob patterns using an LRU cache with bounded size.
// This prevents memory exhaustion from potentially untrusted RBAC patterns
// while still providing significant performance benefits.
globCache *lru.Cache
globCacheLock sync.Mutex
compileGroup singleflight.Group
compileGlob compileFn = glob.Compile
)
func init() {
globCache = lru.New(DefaultGlobCacheSize)
}
// SetCacheSize reinitializes the glob cache with the given maximum number of entries.
// This should be called early during process startup, before concurrent access begins.
func SetCacheSize(maxEntries int) {
globCacheLock.Lock()
defer globCacheLock.Unlock()
globCache = lru.New(maxEntries)
}
// globCacheKey uniquely identifies a compiled glob pattern.
// The same pattern compiled with different separators produces different globs,
// so both fields are needed.
type globCacheKey struct {
Pattern string
Separators string
}
func cacheKey(pattern string, separators ...rune) globCacheKey {
return globCacheKey{Pattern: pattern, Separators: string(separators)}
}
// getOrCompile returns a cached compiled glob pattern, compiling and caching it if necessary.
// Cache hits are a brief lock + map lookup. On cache miss, singleflight ensures each
// unique pattern is compiled exactly once even under concurrent access, while unrelated
// patterns compile in parallel.
// lru.Cache.Get() promotes entries (mutating), so a Mutex is used rather than RWMutex.
func getOrCompile(pattern string, compiler compileFn, separators ...rune) (glob.Glob, error) {
key := cacheKey(pattern, separators...)
globCacheLock.Lock()
if cached, ok := globCache.Get(key); ok {
globCacheLock.Unlock()
return cached.(glob.Glob), nil
}
globCacheLock.Unlock()
sfKey := key.Pattern + "\x00" + key.Separators
v, err, _ := compileGroup.Do(sfKey, func() (any, error) {
compiled, err := compiler(pattern, separators...)
if err != nil {
return nil, err
}
globCacheLock.Lock()
globCache.Add(key, compiled)
globCacheLock.Unlock()
return compiled, nil
})
if err != nil {
return nil, err
}
return v.(glob.Glob), nil
}
// Match tries to match a text with a given glob pattern.
// Compiled glob patterns are cached for performance.
func Match(pattern, text string, separators ...rune) bool {
compiled, err := getOrCompile(pattern, compileGlob, separators...)
compiledGlob, err := glob.Compile(pattern, separators...)
if err != nil {
log.Warnf("failed to compile pattern %s due to error %v", pattern, err)
return false
}
return compiled.Match(text)
return compiledGlob.Match(text)
}
// MatchWithError tries to match a text with a given glob pattern.
// Returns error if the glob pattern fails to compile.
// Compiled glob patterns are cached for performance.
// returns error if the glob pattern fails to compile.
func MatchWithError(pattern, text string, separators ...rune) (bool, error) {
compiled, err := getOrCompile(pattern, compileGlob, separators...)
compiledGlob, err := glob.Compile(pattern, separators...)
if err != nil {
return false, err
}
return compiled.Match(text), nil
return compiledGlob.Match(text), nil
}

View File

@@ -1,57 +1,11 @@
package glob
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"testing"
extglob "github.com/gobwas/glob"
"github.com/stretchr/testify/require"
)
// Test helpers - these access internal variables for testing purposes
// resetGlobCacheForTest clears the cached glob patterns for testing.
func resetGlobCacheForTest() {
globCacheLock.Lock()
defer globCacheLock.Unlock()
globCache.Clear()
}
// isPatternCached returns true if the pattern (with optional separators) is cached.
func isPatternCached(pattern string, separators ...rune) bool {
globCacheLock.Lock()
defer globCacheLock.Unlock()
_, ok := globCache.Get(cacheKey(pattern, separators...))
return ok
}
// globCacheLen returns the number of cached patterns.
func globCacheLen() int {
globCacheLock.Lock()
defer globCacheLock.Unlock()
return globCache.Len()
}
func matchWithCompiler(pattern, text string, compiler compileFn, separators ...rune) bool {
compiled, err := getOrCompile(pattern, compiler, separators...)
if err != nil {
return false
}
return compiled.Match(text)
}
func countingCompiler() (compileFn, *int32) {
var compileCount int32
compiler := func(pattern string, separators ...rune) (extglob.Glob, error) {
atomic.AddInt32(&compileCount, 1)
return extglob.Compile(pattern, separators...)
}
return compiler, &compileCount
}
func Test_Match(t *testing.T) {
tests := []struct {
name string
@@ -132,209 +86,3 @@ func Test_MatchWithError(t *testing.T) {
})
}
}
func Test_GlobCaching(t *testing.T) {
// Clear cache before test
resetGlobCacheForTest()
compiler, compileCount := countingCompiler()
pattern := "test*pattern"
text := "testABCpattern"
// First call should compile and cache
result1 := matchWithCompiler(pattern, text, compiler)
require.True(t, result1)
// Verify pattern is cached
require.True(t, isPatternCached(pattern), "pattern should be cached after first Match call")
// Second call should use cached value
result2 := matchWithCompiler(pattern, text, compiler)
require.True(t, result2)
// Results should be consistent
require.Equal(t, result1, result2)
require.Equal(t, int32(1), atomic.LoadInt32(compileCount), "glob should compile once for the cached pattern")
}
func Test_GlobCachingConcurrent(t *testing.T) {
// Clear cache before test
resetGlobCacheForTest()
compiler, compileCount := countingCompiler()
pattern := "concurrent*test"
text := "concurrentABCtest"
var wg sync.WaitGroup
numGoroutines := 100
errChan := make(chan error, numGoroutines)
for range numGoroutines {
wg.Go(func() {
result := matchWithCompiler(pattern, text, compiler)
if !result {
errChan <- errors.New("expected match to return true")
}
})
}
wg.Wait()
close(errChan)
// Check for any errors from goroutines
for err := range errChan {
t.Error(err)
}
// Verify pattern is cached
require.True(t, isPatternCached(pattern))
require.Equal(t, 1, globCacheLen(), "should only have one cached entry for the pattern")
require.Equal(t, int32(1), atomic.LoadInt32(compileCount), "glob should compile once for the cached pattern")
}
func Test_GlobCacheLRUEviction(t *testing.T) {
// Clear cache before test
resetGlobCacheForTest()
// Fill cache beyond DefaultGlobCacheSize
for i := range DefaultGlobCacheSize + 100 {
pattern := fmt.Sprintf("pattern-%d-*", i)
Match(pattern, "pattern-0-test")
}
// Cache size should be limited to DefaultGlobCacheSize
require.Equal(t, DefaultGlobCacheSize, globCacheLen(), "cache size should be limited to DefaultGlobCacheSize")
// The oldest patterns should be evicted
oldest := fmt.Sprintf("pattern-%d-*", 0)
require.False(t, isPatternCached(oldest), "oldest pattern should be evicted")
// The most recently used patterns should still be cached
require.True(t, isPatternCached(fmt.Sprintf("pattern-%d-*", DefaultGlobCacheSize+99)), "most recent pattern should be cached")
}
func Test_GlobCacheKeyIncludesSeparators(t *testing.T) {
resetGlobCacheForTest()
compiler, compileCount := countingCompiler()
pattern := "a*b"
textWithSlash := "a/b"
// Without separators, '*' matches '/' so "a/b" matches "a*b"
require.True(t, matchWithCompiler(pattern, textWithSlash, compiler))
require.Equal(t, int32(1), atomic.LoadInt32(compileCount))
// With separator '/', '*' does NOT match '/' so "a/b" should NOT match "a*b"
require.False(t, matchWithCompiler(pattern, textWithSlash, compiler, '/'))
require.Equal(t, int32(2), atomic.LoadInt32(compileCount), "same pattern with different separators must compile separately")
// Both entries should be independently cached
require.True(t, isPatternCached(pattern))
require.True(t, isPatternCached(pattern, '/'))
require.Equal(t, 2, globCacheLen())
// Subsequent calls should use cache (no additional compiles)
matchWithCompiler(pattern, textWithSlash, compiler)
matchWithCompiler(pattern, textWithSlash, compiler, '/')
require.Equal(t, int32(2), atomic.LoadInt32(compileCount), "cached patterns should not recompile")
}
func Test_InvalidGlobNotCached(t *testing.T) {
// Clear cache before test
resetGlobCacheForTest()
invalidPattern := "e[[a*"
text := "test"
// Match should return false for invalid pattern
result := Match(invalidPattern, text)
require.False(t, result)
// Invalid patterns should NOT be cached
require.False(t, isPatternCached(invalidPattern), "invalid pattern should not be cached")
// Also test with MatchWithError
_, err := MatchWithError(invalidPattern, text)
require.Error(t, err)
// Still should not be cached after MatchWithError
require.False(t, isPatternCached(invalidPattern), "invalid pattern should not be cached after MatchWithError")
}
func Test_SetCacheSize(t *testing.T) {
resetGlobCacheForTest()
customSize := 5
SetCacheSize(customSize)
defer SetCacheSize(DefaultGlobCacheSize)
for i := range customSize + 3 {
Match(fmt.Sprintf("setsize-%d-*", i), "setsize-0-test")
}
require.Equal(t, customSize, globCacheLen(), "cache size should respect the custom size set via SetCacheSize")
require.False(t, isPatternCached("setsize-0-*"), "oldest pattern should be evicted with custom cache size")
require.True(t, isPatternCached(fmt.Sprintf("setsize-%d-*", customSize+2)), "most recent pattern should be cached")
}
// BenchmarkMatch_WithCache benchmarks Match with caching (cache hit)
func BenchmarkMatch_WithCache(b *testing.B) {
pattern := "proj:*/app-*"
text := "proj:myproject/app-frontend"
// Warm up the cache
Match(pattern, text)
b.ResetTimer()
for i := 0; i < b.N; i++ {
Match(pattern, text)
}
}
// BenchmarkMatch_WithoutCache simulates the OLD behavior (compile every time)
// by calling glob.Compile + Match directly, bypassing the cache entirely.
func BenchmarkMatch_WithoutCache(b *testing.B) {
pattern := "proj:*/app-*"
text := "proj:myproject/app-frontend"
b.ResetTimer()
for i := 0; i < b.N; i++ {
compiled, err := extglob.Compile(pattern)
if err != nil {
b.Fatal(err)
}
compiled.Match(text)
}
}
// BenchmarkGlobCompile measures raw glob.Compile cost
func BenchmarkGlobCompile(b *testing.B) {
pattern := "proj:*/app-*"
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = extglob.Compile(pattern)
}
}
// BenchmarkMatch_RBACSimulation simulates real RBAC evaluation scenario
// 50 policies × 1 app = what happens per application in List
func BenchmarkMatch_RBACSimulation(b *testing.B) {
patterns := make([]string, 50)
for i := range 50 {
patterns[i] = fmt.Sprintf("proj:team-%d/*", i)
}
text := "proj:team-25/my-app"
// With caching: patterns are compiled once
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, pattern := range patterns {
Match(pattern, text)
}
}
}

View File

@@ -734,13 +734,10 @@ func TestClusterInformer_SecretDeletion(t *testing.T) {
err = clientset.CoreV1().Secrets("argocd").Delete(t.Context(), "cluster1", metav1.DeleteOptions{})
require.NoError(t, err)
require.Eventually(t, func() bool {
_, err := informer.GetClusterByURL("https://cluster1.example.com")
return err != nil
}, 5*time.Second, 10*time.Millisecond, "expected cluster1 to be removed from cache after secret deletion")
time.Sleep(100 * time.Millisecond)
_, err = informer.GetClusterByURL("https://cluster1.example.com")
require.Error(t, err)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not found")
cluster2, err := informer.GetClusterByURL("https://cluster2.example.com")

View File

@@ -238,18 +238,14 @@ func (a *ArgoCDWebhookHandler) affectedRevisionInfo(payloadIf any) (webURLs []st
break
}
log.Debugf("created bitbucket client with base URL '%s'", apiBaseURL)
owner, repoSlug, ok := strings.Cut(payload.Repository.FullName, "/")
if !ok || owner == "" || repoSlug == "" {
log.Warnf("error parsing bitbucket repository full name %q", payload.Repository.FullName)
break
}
owner := strings.ReplaceAll(payload.Repository.FullName, "/"+payload.Repository.Name, "")
spec := change.shaBefore + ".." + change.shaAfter
diffStatChangedFiles, err := fetchDiffStatFromBitbucket(ctx, bbClient, owner, repoSlug, spec)
diffStatChangedFiles, err := fetchDiffStatFromBitbucket(ctx, bbClient, owner, payload.Repository.Name, spec)
if err != nil {
log.Warnf("error fetching changed files using bitbucket diffstat api: %v", err)
}
changedFiles = append(changedFiles, diffStatChangedFiles...)
touchedHead, err = isHeadTouched(ctx, bbClient, owner, repoSlug, revision)
touchedHead, err = isHeadTouched(ctx, bbClient, owner, payload.Repository.Name, revision)
if err != nil {
log.Warnf("error fetching bitbucket repo details: %v", err)
// To be safe, we just return true and let the controller check for himself.

View File

@@ -1232,7 +1232,7 @@ func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
"repository":{
"type": "repository",
"full_name": "{{.owner}}/{{.repo}}",
"name": "{{.name}}",
"name": "{{.repo}}",
"scm": "git",
"links": {
"self": {"href": "https://api.bitbucket.org/2.0/repositories/{{.owner}}/{{.repo}}"},
@@ -1245,7 +1245,7 @@ func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
panic(err)
}
bitbucketPushPayload := func(branchName, owner, repo, name string) bitbucket.RepoPushPayload {
bitbucketPushPayload := func(branchName, owner, repo string) bitbucket.RepoPushPayload {
// The payload's "push.changes[0].new.name" member seems to only have the branch name (based on the example payload).
// https://support.atlassian.com/bitbucket-cloud/docs/event-payloads/#EventPayloads-Push
var pl bitbucket.RepoPushPayload
@@ -1254,7 +1254,6 @@ func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
"branch": branchName,
"owner": owner,
"repo": repo,
"name": name,
"oldHash": "abcdef",
"newHash": "ghijkl",
})
@@ -1277,7 +1276,7 @@ func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
"bitbucket branch name containing 'refs/heads/'",
false,
"release-0.0",
bitbucketPushPayload("release-0.0", "test-owner", "test-repo", "test-repo"),
bitbucketPushPayload("release-0.0", "test-owner", "test-repo"),
false,
[]string{"guestbook/guestbook-ui-deployment.yaml"},
changeInfo{
@@ -1289,55 +1288,7 @@ func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
"bitbucket branch name containing 'main'",
false,
"main",
bitbucketPushPayload("main", "test-owner", "test-repo", "test-repo"),
true,
[]string{"guestbook/guestbook-ui-deployment.yaml"},
changeInfo{
shaBefore: "abcdef",
shaAfter: "ghijkl",
},
},
{
"bitbucket display name is mixed case, differs from repo slug",
false,
"main",
bitbucketPushPayload("main", "test-owner", "test-repo", "Test Repo"),
true,
[]string{"guestbook/guestbook-ui-deployment.yaml"},
changeInfo{
shaBefore: "abcdef",
shaAfter: "ghijkl",
},
},
{
"bitbucket display name is all uppercase, differs from repo slug",
false,
"main",
bitbucketPushPayload("main", "test-owner", "test-repo", "TESTREPO"),
true,
[]string{"guestbook/guestbook-ui-deployment.yaml"},
changeInfo{
shaBefore: "abcdef",
shaAfter: "ghijkl",
},
},
{
"bitbucket display name is all lowercase, differs from repo slug",
false,
"main",
bitbucketPushPayload("main", "test-owner", "test-repo", "testrepo"),
true,
[]string{"guestbook/guestbook-ui-deployment.yaml"},
changeInfo{
shaBefore: "abcdef",
shaAfter: "ghijkl",
},
},
{
"bitbucket display name is all uppercase with spaces, differs from repo slug",
false,
"main",
bitbucketPushPayload("main", "test-owner", "test-repo", "TEST REPO"),
bitbucketPushPayload("main", "test-owner", "test-repo"),
true,
[]string{"guestbook/guestbook-ui-deployment.yaml"},
changeInfo{