Compare commits

..

11 Commits

Author SHA1 Message Date
Blake Pettersson
7fd134a040 chore(deps): bump go-jose from 4.1.3 to 4.1.4 (cherry-pick #27101 for… (#27208)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
2026-04-07 15:44:35 +02:00
argo-cd-cherry-pick-bot[bot]
b948ff074d fix: use unique names for initial commits (cherry-pick #27171 for 3.3) (#27197)
Signed-off-by: Sean Liao <sean@liao.dev>
Co-authored-by: Sean Liao <sean@liao.dev>
2026-04-06 16:47:43 -04:00
Pasha Kostohrys
2daefc0452 chore(deps): update notifications-engine dependency in release-3.3 to v0.5.1-0.20260316232552-d27ba0152c1c (#27093)
Co-authored-by: pasha <pasha.k@fyxt.com>
2026-04-06 05:01:26 +03:00
argo-cd-cherry-pick-bot[bot]
3b12b2e0d5 fix: Add X-Frame-Options and CSP headers to Swagger UI endpoints (cherry-pick #26521 for 3.3) (#27154)
Signed-off-by: rohansood10 <rohansood10@users.noreply.github.com>
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: Rohan Sood <56945243+rohansood10@users.noreply.github.com>
Co-authored-by: rohansood10 <rohansood10@users.noreply.github.com>
Co-authored-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-04-06 04:36:01 +03:00
argo-cd-cherry-pick-bot[bot]
2512512b0c fix: trigger app sync on app-set spec change (cherry-pick #26811 for 3.3) (#27130)
Signed-off-by: Patroklos Papapetrou <ppapapetrou76@gmail.com>
Co-authored-by: Papapetrou Patroklos <1743100+ppapapetrou76@users.noreply.github.com>
Co-authored-by: Jann Fischer <jann@mistrust.net>
2026-04-05 18:50:09 -04:00
argo-cd-cherry-pick-bot[bot]
a77c1501fe fix(docs): Fix manifest path in Source Hydrator docs (cherry-pick #27123 for 3.3) (#27167)
Signed-off-by: Oliver Gondža <ogondza@gmail.com>
Co-authored-by: Oliver Gondža <ogondza@gmail.com>
2026-04-05 10:22:45 -04:00
rumstead
671efff31d fix(controller): reduce secret deepcopies and deserialization (#27049) (cherry-pick release-3.3) (#27129)
Signed-off-by: rumstead <37445536+rumstead@users.noreply.github.com>
2026-04-02 15:08:40 -04:00
argo-cd-cherry-pick-bot[bot]
f0b924f0b0 fix(server): Ensure OIDC config is refreshed at server restart (cherry-pick #26913 for 3.3) (#27114)
Signed-off-by: OpenGuidou <guillaume.doussin@gmail.com>
Co-authored-by: OpenGuidou <73480729+OpenGuidou@users.noreply.github.com>
2026-04-01 17:58:20 -07:00
Jonathan Ogilvie
ea51bd8047 [release-3.3] fix: improve perf: switch parentUIDToChildren to map of sets, remove cache rebuild (#26863) (#27108)
Signed-off-by: Jonathan Ogilvie <jonathan.ogilvie@sumologic.com>
Signed-off-by: Jonathan Ogilvie <679297+jcogilvie@users.noreply.github.com>
2026-04-01 12:19:29 -04:00
argo-cd-cherry-pick-bot[bot]
e372a7f09c fix: pass repo.insecure flag to helm dependency build (cherry-pick #27078 for 3.3) (#27083)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: Blake Pettersson <blake.pettersson@gmail.com>
2026-03-30 22:38:26 -10:00
argo-cd-cherry-pick-bot[bot]
e451591448 fix: use force flag in copyutil symlink to prevent repo-server crashes (cherry-pick #26613 for 3.3) (#27057)
Signed-off-by: manvitha92 <62259625+manvitha92@users.noreply.github.com>
Co-authored-by: ManvithaP <62259625+ManvithaP-hub@users.noreply.github.com>
Co-authored-by: manvitha92 <62259625+manvitha92@users.noreply.github.com>
2026-03-29 15:23:56 +03:00
37 changed files with 1108 additions and 154 deletions

View File

@@ -73,6 +73,9 @@ const (
ReconcileRequeueOnValidationError = time.Minute * 3
ReverseDeletionOrder = "Reverse"
AllAtOnceDeletionOrder = "AllAtOnce"
revisionAndSpecChangedMsg = "Application has pending changes (revision and spec differ), setting status to Waiting"
revisionChangedMsg = "Application has pending changes, setting status to Waiting"
specChangedMsg = "Application has pending changes (spec differs), setting status to Waiting"
)
var defaultPreservedFinalizers = []string{
@@ -960,7 +963,7 @@ func (r *ApplicationSetReconciler) removeOwnerReferencesOnDeleteAppSet(ctx conte
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application) (map[string]bool, error) {
appDependencyList, appStepMap := r.buildAppDependencyList(logCtx, appset, desiredApplications)
_, err := r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
_, err := r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, desiredApplications, appStepMap)
if err != nil {
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
}
@@ -1139,10 +1142,16 @@ func getAppStep(appName string, appStepMap map[string]int) int {
}
// check the status of each Application's status and promote Applications to the next status if needed
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
// Build a map of desired applications for quick lookup
desiredAppsMap := make(map[string]*argov1alpha1.Application)
for i := range desiredApplications {
desiredAppsMap[desiredApplications[i].Name] = &desiredApplications[i]
}
for _, app := range applications {
appHealthStatus := app.Status.Health.Status
appSyncStatus := app.Status.Sync.Status
@@ -1177,10 +1186,27 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
newAppStatus := currentAppStatus.DeepCopy()
newAppStatus.Step = strconv.Itoa(getAppStep(newAppStatus.Application, appStepMap))
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
// A new version is available in the application and we need to re-sync the application
revisionsChanged := !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions())
// Check if the desired Application spec differs from the current Application spec
specChanged := false
if desiredApp, ok := desiredAppsMap[app.Name]; ok {
// Compare the desired spec with the current spec to detect non-Git changes
// This will catch changes to generator parameters like image tags, helm values, etc.
specChanged = !cmp.Equal(desiredApp.Spec, app.Spec, cmpopts.EquateEmpty(), cmpopts.EquateComparable(argov1alpha1.ApplicationDestination{}))
}
if revisionsChanged || specChanged {
newAppStatus.TargetRevisions = app.Status.GetRevisions()
newAppStatus.Message = "Application has pending changes, setting status to Waiting"
switch {
case revisionsChanged && specChanged:
newAppStatus.Message = revisionAndSpecChangedMsg
case revisionsChanged:
newAppStatus.Message = revisionChangedMsg
default:
newAppStatus.Message = specChangedMsg
}
newAppStatus.Status = argov1alpha1.ProgressiveSyncWaiting
newAppStatus.LastTransitionTime = &now
}

View File

@@ -4637,6 +4637,12 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
}
}
newAppWithSpec := func(name string, health health.HealthStatusCode, sync v1alpha1.SyncStatusCode, revision string, opState *v1alpha1.OperationState, spec v1alpha1.ApplicationSpec) v1alpha1.Application {
app := newApp(name, health, sync, revision, opState)
app.Spec = spec
return app
}
newOperationState := func(phase common.OperationPhase) *v1alpha1.OperationState {
finishedAt := &metav1.Time{Time: time.Now().Add(-1 * time.Second)}
if !phase.Completed() {
@@ -4653,6 +4659,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
name string
appSet v1alpha1.ApplicationSet
apps []v1alpha1.Application
desiredApps []v1alpha1.Application
appStepMap map[string]int
expectedAppStatus []v1alpha1.ApplicationSetApplicationStatus
}{
@@ -4806,14 +4813,14 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Message: "Application has pending changes, setting status to Waiting",
Message: revisionChangedMsg,
Status: v1alpha1.ProgressiveSyncWaiting,
Step: "1",
TargetRevisions: []string{"next"},
},
{
Application: "app2-multisource",
Message: "Application has pending changes, setting status to Waiting",
Message: revisionChangedMsg,
Status: v1alpha1.ProgressiveSyncWaiting,
Step: "1",
TargetRevisions: []string{"next"},
@@ -5253,6 +5260,191 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
{
name: "detects spec changes when image tag changes in generator (same Git revision)",
appSet: newDefaultAppSet(2, []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Message: "",
Status: v1alpha1.ProgressiveSyncHealthy,
Step: "1",
TargetRevisions: []string{"abc123"},
},
}),
apps: []v1alpha1.Application{
newAppWithSpec("app1", health.HealthStatusHealthy, v1alpha1.SyncStatusCodeOutOfSync, "abc123", nil, // Changed to OutOfSync
v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
RepoURL: "https://example.com/repo.git",
TargetRevision: "master",
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{Name: "image.tag", Value: "v1.0.0"},
},
},
},
Destination: v1alpha1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "default",
},
}),
},
desiredApps: []v1alpha1.Application{
newAppWithSpec("app1", health.HealthStatusHealthy, v1alpha1.SyncStatusCodeOutOfSync, "abc123", nil, // Changed to OutOfSync
v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
RepoURL: "https://example.com/repo.git",
TargetRevision: "master",
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{Name: "image.tag", Value: "v2.0.0"}, // Different value
},
},
},
Destination: v1alpha1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "default",
},
}),
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Message: specChangedMsg,
Status: v1alpha1.ProgressiveSyncWaiting,
Step: "1",
TargetRevisions: []string{"abc123"},
},
},
},
{
name: "does not detect changes when spec is identical (same Git revision)",
appSet: newDefaultAppSet(2, []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Message: "",
Status: v1alpha1.ProgressiveSyncHealthy,
Step: "1",
TargetRevisions: []string{"abc123"},
},
}),
apps: []v1alpha1.Application{
newAppWithSpec("app1", health.HealthStatusHealthy, v1alpha1.SyncStatusCodeSynced, "abc123", nil,
v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
RepoURL: "https://example.com/repo.git",
TargetRevision: "master",
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{Name: "image.tag", Value: "v1.0.0"},
},
},
},
Destination: v1alpha1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "default",
},
}),
},
appStepMap: map[string]int{
"app1": 0,
},
// Desired apps have identical spec
desiredApps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
},
Spec: v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
RepoURL: "https://example.com/repo.git",
TargetRevision: "master",
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{Name: "image.tag", Value: "v1.0.0"}, // Same value
},
},
},
Destination: v1alpha1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "default",
},
},
},
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Message: "",
Status: v1alpha1.ProgressiveSyncHealthy,
Step: "1",
TargetRevisions: []string{"abc123"},
},
},
},
{
name: "detects both spec and revision changes",
appSet: newDefaultAppSet(2, []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Message: "",
Status: v1alpha1.ProgressiveSyncHealthy,
Step: "1",
TargetRevisions: []string{"abc123"}, // OLD revision in status
},
}),
apps: []v1alpha1.Application{
newAppWithSpec("app1", health.HealthStatusHealthy, v1alpha1.SyncStatusCodeOutOfSync, "def456", nil, // NEW revision, but OutOfSync
v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
RepoURL: "https://example.com/repo.git",
TargetRevision: "master",
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{Name: "image.tag", Value: "v1.0.0"},
},
},
},
Destination: v1alpha1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "default",
},
}),
},
desiredApps: []v1alpha1.Application{
newAppWithSpec("app1", health.HealthStatusHealthy, v1alpha1.SyncStatusCodeOutOfSync, "def456", nil,
v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
RepoURL: "https://example.com/repo.git",
TargetRevision: "master",
Helm: &v1alpha1.ApplicationSourceHelm{
Parameters: []v1alpha1.HelmParameter{
{Name: "image.tag", Value: "v2.0.0"}, // Changed value
},
},
},
Destination: v1alpha1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "default",
},
}),
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
Message: revisionAndSpecChangedMsg,
Status: v1alpha1.ProgressiveSyncWaiting,
Step: "1",
TargetRevisions: []string{"def456"},
},
},
},
} {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
@@ -5272,7 +5464,11 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
Metrics: metrics,
}
appStatuses, err := r.updateApplicationSetApplicationStatus(t.Context(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps, cc.appStepMap)
desiredApps := cc.desiredApps
if desiredApps == nil {
desiredApps = cc.apps
}
appStatuses, err := r.updateApplicationSetApplicationStatus(t.Context(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps, desiredApps, cc.appStepMap)
// opt out of testing the LastTransitionTime is accurate
for i := range appStatuses {

View File

@@ -132,11 +132,11 @@ func (c *clusterInfoUpdater) getUpdatedClusterInfo(ctx context.Context, apps []*
continue
}
}
destCluster, err := argo.GetDestinationCluster(ctx, a.Spec.Destination, c.db)
destServer, err := argo.GetDestinationServer(ctx, a.Spec.Destination, c.db)
if err != nil {
continue
}
if destCluster.Server == cluster.Server {
if destServer == cluster.Server {
appCount++
}
}

View File

@@ -101,6 +101,121 @@ func TestClusterSecretUpdater(t *testing.T) {
}
}
func TestGetUpdatedClusterInfo_AppCount(t *testing.T) {
const fakeNamespace = "fake-ns"
const clusterServer = "https://prod.example.com"
const clusterName = "prod"
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string]string{},
}
argoCDSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string][]byte{"admin.password": nil, "server.secretkey": nil},
}
clusterSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "prod-cluster",
Namespace: fakeNamespace,
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
Annotations: map[string]string{
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
},
},
Data: map[string][]byte{
"name": []byte(clusterName),
"server": []byte(clusterServer),
"config": []byte("{}"),
},
}
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecret, clusterSecret)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
argoDB := db.NewDB(fakeNamespace, settingsManager, kubeclientset)
apps := []*v1alpha1.Application{
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Name: clusterName}}},
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Server: clusterServer}}},
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Server: "https://other.example.com"}}},
}
updater := &clusterInfoUpdater{db: argoDB, namespace: fakeNamespace}
cluster := v1alpha1.Cluster{Server: clusterServer}
info := updater.getUpdatedClusterInfo(t.Context(), apps, cluster, nil, metav1.Now())
assert.Equal(t, int64(2), info.ApplicationsCount)
}
func TestGetUpdatedClusterInfo_AmbiguousName(t *testing.T) {
const fakeNamespace = "fake-ns"
const clusterServer = "https://prod.example.com"
const clusterName = "prod"
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string]string{},
}
argoCDSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string][]byte{"admin.password": nil, "server.secretkey": nil},
}
makeClusterSecret := func(secretName, server string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: fakeNamespace,
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
Annotations: map[string]string{
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
},
},
Data: map[string][]byte{
"name": []byte(clusterName),
"server": []byte(server),
"config": []byte("{}"),
},
}
}
// Two secrets share the same cluster name
kubeclientset := fake.NewClientset(
emptyArgoCDConfigMap, argoCDSecret,
makeClusterSecret("prod-cluster-1", clusterServer),
makeClusterSecret("prod-cluster-2", "https://prod2.example.com"),
)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
argoDB := db.NewDB(fakeNamespace, settingsManager, kubeclientset)
apps := []*v1alpha1.Application{
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Name: clusterName}}},
}
updater := &clusterInfoUpdater{db: argoDB, namespace: fakeNamespace}
cluster := v1alpha1.Cluster{Server: clusterServer}
info := updater.getUpdatedClusterInfo(t.Context(), apps, cluster, nil, metav1.Now())
assert.Equal(t, int64(0), info.ApplicationsCount, "ambiguous name should not count app")
}
func TestUpdateClusterLabels(t *testing.T) {
shouldNotBeInvoked := func(_ context.Context, _ *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
shouldNotHappen := errors.New("if an error happens here, something's wrong")

View File

@@ -6,7 +6,7 @@ Tools like Helm and Kustomize allow users to express their Kubernetes manifests
(keeping it DRY - Don't Repeat Yourself). However, these tools can obscure the actual Kubernetes manifests that are
applied to the cluster.
The "rendered manifest pattern" is a feature of Argo CD that allows users to push the hydrated manifests to git before syncing them to the cluster. This
The *rendered manifest pattern* is a feature of Argo CD that allows users to push the hydrated manifests to git before syncing them to the cluster. This
allows users to see the actual Kubernetes manifests that are applied to the cluster.
## Enabling the Source Hydrator
@@ -14,7 +14,7 @@ allows users to see the actual Kubernetes manifests that are applied to the clus
The source hydrator is disabled by default.
To enable the source hydrator, you need to enable the "commit server" component and set the `hydrator.enabled` field in
argocd-cmd-params-cm ConfigMap to `"true"`.
`argocd-cmd-params-cm` ConfigMap to `"true"`.
```yaml
apiVersion: v1
@@ -40,7 +40,7 @@ With hydrator: https://raw.githubusercontent.com/argoproj/argo-cd/stable/mani
```
> [!IMPORTANT]
> The `*-with-hydrator-install.yaml` manifests will eventually be removed when the source hydrator is either enabled
> The `*-install-with-hydrator.yaml` manifests will eventually be removed when the source hydrator is either enabled
> by default or removed. The upgrade guide will note if the `install-with-hydrator.yaml` manifests are no longer
> available.

View File

@@ -220,7 +220,7 @@ func NewClusterCache(config *rest.Config, opts ...UpdateSettingsFunc) *clusterCa
listRetryLimit: 1,
listRetryUseBackoff: false,
listRetryFunc: ListRetryFuncNever,
parentUIDToChildren: make(map[types.UID][]kube.ResourceKey),
parentUIDToChildren: make(map[types.UID]map[kube.ResourceKey]struct{}),
}
for i := range opts {
opts[i](cache)
@@ -280,10 +280,11 @@ type clusterCache struct {
respectRBAC int
// Parent-to-children index for O(1) hierarchy traversal
// Maps any resource's UID to its direct children's ResourceKeys
// Eliminates need for O(n) graph building during hierarchy traversal
parentUIDToChildren map[types.UID][]kube.ResourceKey
// Parent-to-children index for O(1) child lookup during hierarchy traversal
// Maps any resource's UID to a set of its direct children's ResourceKeys
// Using a set eliminates O(k) duplicate checking on insertions
// Used for cross-namespace hierarchy traversal; namespaced traversal still builds a graph
parentUIDToChildren map[types.UID]map[kube.ResourceKey]struct{}
}
type clusterCacheSync struct {
@@ -504,27 +505,35 @@ func (c *clusterCache) setNode(n *Resource) {
for k, v := range ns {
// update child resource owner references
if n.isInferredParentOf != nil && mightHaveInferredOwner(v) {
v.setOwnerRef(n.toOwnerRef(), n.isInferredParentOf(k))
shouldBeParent := n.isInferredParentOf(k)
v.setOwnerRef(n.toOwnerRef(), shouldBeParent)
// Update index inline for inferred ref changes.
// Note: The removal case (shouldBeParent=false) is currently unreachable for
// StatefulSet→PVC relationships because Kubernetes makes volumeClaimTemplates
// immutable. We include it for defensive correctness and future-proofing.
if n.Ref.UID != "" {
if shouldBeParent {
c.addToParentUIDToChildren(n.Ref.UID, k)
} else {
c.removeFromParentUIDToChildren(n.Ref.UID, k)
}
}
}
if mightHaveInferredOwner(n) && v.isInferredParentOf != nil {
n.setOwnerRef(v.toOwnerRef(), v.isInferredParentOf(n.ResourceKey()))
}
}
}
}
// rebuildParentToChildrenIndex rebuilds the parent-to-children index after a full sync
// This is called after initial sync to ensure all parent-child relationships are tracked
func (c *clusterCache) rebuildParentToChildrenIndex() {
// Clear existing index
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
// Rebuild parent-to-children index from all resources with owner refs
for _, resource := range c.resources {
key := resource.ResourceKey()
for _, ownerRef := range resource.OwnerRefs {
if ownerRef.UID != "" {
c.addToParentUIDToChildren(ownerRef.UID, key)
childKey := n.ResourceKey()
shouldBeParent := v.isInferredParentOf(childKey)
n.setOwnerRef(v.toOwnerRef(), shouldBeParent)
// Update index inline for inferred ref changes.
// Note: The removal case (shouldBeParent=false) is currently unreachable for
// StatefulSet→PVC relationships because Kubernetes makes volumeClaimTemplates
// immutable. We include it for defensive correctness and future-proofing.
if v.Ref.UID != "" {
if shouldBeParent {
c.addToParentUIDToChildren(v.Ref.UID, childKey)
} else {
c.removeFromParentUIDToChildren(v.Ref.UID, childKey)
}
}
}
}
}
@@ -533,31 +542,29 @@ func (c *clusterCache) rebuildParentToChildrenIndex() {
// addToParentUIDToChildren adds a child to the parent-to-children index
func (c *clusterCache) addToParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
// Check if child is already in the list to avoid duplicates
children := c.parentUIDToChildren[parentUID]
for _, existing := range children {
if existing == childKey {
return // Already exists, no need to add
}
// Get or create the set for this parent
childrenSet := c.parentUIDToChildren[parentUID]
if childrenSet == nil {
childrenSet = make(map[kube.ResourceKey]struct{})
c.parentUIDToChildren[parentUID] = childrenSet
}
c.parentUIDToChildren[parentUID] = append(children, childKey)
// Add child to set (O(1) operation, automatically handles duplicates)
childrenSet[childKey] = struct{}{}
}
// removeFromParentUIDToChildren removes a child from the parent-to-children index
func (c *clusterCache) removeFromParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
children := c.parentUIDToChildren[parentUID]
for i, existing := range children {
if existing == childKey {
// Remove by swapping with last element and truncating
children[i] = children[len(children)-1]
c.parentUIDToChildren[parentUID] = children[:len(children)-1]
childrenSet := c.parentUIDToChildren[parentUID]
if childrenSet == nil {
return
}
// Clean up empty entries
if len(c.parentUIDToChildren[parentUID]) == 0 {
delete(c.parentUIDToChildren, parentUID)
}
return
}
// Remove child from set (O(1) operation)
delete(childrenSet, childKey)
// Clean up empty sets to avoid memory leaks
if len(childrenSet) == 0 {
delete(c.parentUIDToChildren, parentUID)
}
}
@@ -1014,7 +1021,7 @@ func (c *clusterCache) sync() error {
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
c.resources = make(map[kube.ResourceKey]*Resource)
c.namespacedResources = make(map[schema.GroupKind]bool)
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
c.parentUIDToChildren = make(map[types.UID]map[kube.ResourceKey]struct{})
config := c.config
version, err := c.kubectl.GetServerVersion(config)
if err != nil {
@@ -1113,9 +1120,6 @@ func (c *clusterCache) sync() error {
return fmt.Errorf("failed to sync cluster %s: %w", c.config.Host, err)
}
// Rebuild orphaned children index after all resources are loaded
c.rebuildParentToChildrenIndex()
c.log.Info("Cluster successfully synced")
return nil
}
@@ -1256,8 +1260,8 @@ func (c *clusterCache) processCrossNamespaceChildren(
}
// Use parent-to-children index for O(1) lookup of direct children
childKeys := c.parentUIDToChildren[clusterResource.Ref.UID]
for _, childKey := range childKeys {
childrenSet := c.parentUIDToChildren[clusterResource.Ref.UID]
for childKey := range childrenSet {
child := c.resources[childKey]
if child == nil {
continue
@@ -1310,8 +1314,8 @@ func (c *clusterCache) iterateChildrenUsingIndex(
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
// Look up direct children of this parent using the index
childKeys := c.parentUIDToChildren[parent.Ref.UID]
for _, childKey := range childKeys {
childrenSet := c.parentUIDToChildren[parent.Ref.UID]
for childKey := range childrenSet {
if actionCallState[childKey] != notCalled {
continue // action() already called or in progress
}
@@ -1631,6 +1635,10 @@ func (c *clusterCache) onNodeRemoved(key kube.ResourceKey) {
for k, v := range ns {
if mightHaveInferredOwner(v) && existing.isInferredParentOf(k) {
v.setOwnerRef(existing.toOwnerRef(), false)
// Update index inline when removing inferred ref
if existing.Ref.UID != "" {
c.removeFromParentUIDToChildren(existing.Ref.UID, k)
}
}
}
}

View File

@@ -416,6 +416,128 @@ func TestStatefulSetOwnershipInferred(t *testing.T) {
}
}
// TestStatefulSetPVC_ParentToChildrenIndex verifies that inferred StatefulSet → PVC
// relationships are correctly captured in the parentUIDToChildren index during initial sync.
//
// The index is updated inline when inferred owner refs are added in setNode()
// (see the inferred parent handling section in clusterCache.setNode).
func TestStatefulSetPVC_ParentToChildrenIndex(t *testing.T) {
stsUID := types.UID("sts-uid-123")
// StatefulSet with volumeClaimTemplate named "data"
sts := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind},
ObjectMeta: metav1.ObjectMeta{UID: stsUID, Name: "web", Namespace: "default"},
Spec: appsv1.StatefulSetSpec{
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{
ObjectMeta: metav1.ObjectMeta{Name: "data"},
}},
},
}
// PVCs that match the StatefulSet's volumeClaimTemplate pattern: <template>-<sts>-<ordinal>
// These have NO explicit owner references - the relationship is INFERRED
pvc0 := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
ObjectMeta: metav1.ObjectMeta{UID: "pvc-0-uid", Name: "data-web-0", Namespace: "default"},
}
pvc1 := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
ObjectMeta: metav1.ObjectMeta{UID: "pvc-1-uid", Name: "data-web-1", Namespace: "default"},
}
// Create cluster with all resources
// Must add PersistentVolumeClaim to API resources since it's not in the default set
cluster := newCluster(t, sts, pvc0, pvc1).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: kube.PersistentVolumeClaimKind},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"},
Meta: metav1.APIResource{Namespaced: true},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
// Verify the parentUIDToChildren index contains the inferred relationships
cluster.lock.RLock()
defer cluster.lock.RUnlock()
pvc0Key := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "data-web-0"}
pvc1Key := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "data-web-1"}
children, ok := cluster.parentUIDToChildren[stsUID]
require.True(t, ok, "StatefulSet should have entry in parentUIDToChildren index")
require.Contains(t, children, pvc0Key, "PVC data-web-0 should be in StatefulSet's children (inferred relationship)")
require.Contains(t, children, pvc1Key, "PVC data-web-1 should be in StatefulSet's children (inferred relationship)")
// Also verify the OwnerRefs were set correctly on the PVCs
pvc0Resource := cluster.resources[pvc0Key]
require.NotNil(t, pvc0Resource)
require.Len(t, pvc0Resource.OwnerRefs, 1, "PVC0 should have inferred owner ref")
require.Equal(t, stsUID, pvc0Resource.OwnerRefs[0].UID, "PVC0 owner should be the StatefulSet")
pvc1Resource := cluster.resources[pvc1Key]
require.NotNil(t, pvc1Resource)
require.Len(t, pvc1Resource.OwnerRefs, 1, "PVC1 should have inferred owner ref")
require.Equal(t, stsUID, pvc1Resource.OwnerRefs[0].UID, "PVC1 owner should be the StatefulSet")
}
// TestStatefulSetPVC_WatchEvent_IndexUpdated verifies that when a PVC is added
// via watch event (after initial sync), both the inferred owner reference AND
// the parentUIDToChildren index are updated correctly.
//
// This tests the inline index update logic in setNode() which updates the index
// immediately when inferred owner refs are added.
func TestStatefulSetPVC_WatchEvent_IndexUpdated(t *testing.T) {
stsUID := types.UID("sts-uid-456")
// StatefulSet with volumeClaimTemplate
sts := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind},
ObjectMeta: metav1.ObjectMeta{UID: stsUID, Name: "db", Namespace: "default"},
Spec: appsv1.StatefulSetSpec{
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{
ObjectMeta: metav1.ObjectMeta{Name: "storage"},
}},
},
}
// Create cluster with ONLY the StatefulSet - PVC will be added via watch event
cluster := newCluster(t, sts).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: kube.PersistentVolumeClaimKind},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"},
Meta: metav1.APIResource{Namespaced: true},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
// PVC that matches the StatefulSet's volumeClaimTemplate pattern
// Added via watch event AFTER initial sync
pvc := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
ObjectMeta: metav1.ObjectMeta{UID: "pvc-watch-uid", Name: "storage-db-0", Namespace: "default"},
}
// Simulate watch event adding the PVC
cluster.lock.Lock()
cluster.setNode(cluster.newResource(mustToUnstructured(pvc)))
cluster.lock.Unlock()
cluster.lock.RLock()
defer cluster.lock.RUnlock()
pvcKey := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "storage-db-0"}
// Verify the OwnerRef IS correctly set
pvcResource := cluster.resources[pvcKey]
require.NotNil(t, pvcResource, "PVC should exist in cache")
require.Len(t, pvcResource.OwnerRefs, 1, "PVC should have inferred owner ref from StatefulSet")
require.Equal(t, stsUID, pvcResource.OwnerRefs[0].UID, "Owner should be the StatefulSet")
// Verify the index IS updated for inferred refs via watch events
children, indexUpdated := cluster.parentUIDToChildren[stsUID]
require.True(t, indexUpdated, "Index should be updated when inferred refs are added via watch events")
require.Contains(t, children, pvcKey, "PVC should be in StatefulSet's children (inferred relationship)")
}
func TestEnsureSyncedSingleNamespace(t *testing.T) {
obj1 := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -2298,3 +2420,226 @@ func TestIterateHierarchyV2_CircularOwnerChain_NoStackOverflow(t *testing.T) {
assert.Equal(t, 1, visitCount["resource-a"], "resource-a should be visited exactly once")
assert.Equal(t, 1, visitCount["resource-b"], "resource-b should be visited exactly once")
}
// BenchmarkSync_ParentToChildrenIndex measures the overhead of parent-to-children index
// operations during sync. This benchmark was created to investigate performance regression
// reported in https://github.com/argoproj/argo-cd/issues/26863
//
// The index is now maintained with O(1) operations (set-based) and updated inline
// in setNode() for both explicit and inferred owner refs. No rebuild is needed.
//
// This benchmark measures sync performance with resources that have owner references
// to quantify the index-building overhead at different scales.
func BenchmarkSync_ParentToChildrenIndex(b *testing.B) {
testCases := []struct {
name string
totalResources int
pctWithOwnerRefs int // Percentage of resources with owner references
}{
// Baseline: no owner refs (index operations are no-ops)
{"1000res_0pctOwnerRefs", 1000, 0},
{"5000res_0pctOwnerRefs", 5000, 0},
{"10000res_0pctOwnerRefs", 10000, 0},
// Typical case: ~80% of resources have owner refs (pods owned by RS, RS owned by Deployment)
{"1000res_80pctOwnerRefs", 1000, 80},
{"5000res_80pctOwnerRefs", 5000, 80},
{"10000res_80pctOwnerRefs", 10000, 80},
// Heavy case: all resources have owner refs
{"1000res_100pctOwnerRefs", 1000, 100},
{"5000res_100pctOwnerRefs", 5000, 100},
{"10000res_100pctOwnerRefs", 10000, 100},
// Stress test: larger scale
{"20000res_80pctOwnerRefs", 20000, 80},
}
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
resources := make([]runtime.Object, 0, tc.totalResources)
// Create parent resources (deployments) - these won't have owner refs
numParents := tc.totalResources / 10 // 10% are parents
if numParents < 1 {
numParents = 1
}
parentUIDs := make([]types.UID, numParents)
for i := 0; i < numParents; i++ {
uid := types.UID(fmt.Sprintf("deploy-uid-%d", i))
parentUIDs[i] = uid
resources = append(resources, &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("deploy-%d", i),
Namespace: "default",
UID: uid,
},
})
}
// Create child resources (pods) - some with owner refs
numChildren := tc.totalResources - numParents
numWithOwnerRefs := (numChildren * tc.pctWithOwnerRefs) / 100
for i := 0; i < numChildren; i++ {
pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%d", i),
Namespace: "default",
UID: types.UID(fmt.Sprintf("pod-uid-%d", i)),
},
}
// Add owner refs to the first numWithOwnerRefs pods
if i < numWithOwnerRefs {
parentIdx := i % numParents
pod.OwnerReferences = []metav1.OwnerReference{{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: fmt.Sprintf("deploy-%d", parentIdx),
UID: parentUIDs[parentIdx],
}}
}
resources = append(resources, pod)
}
cluster := newCluster(b, resources...)
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
// sync() reinitializes resources, parentUIDToChildren, etc. at the start,
// so no manual reset is needed here.
err := cluster.sync()
if err != nil {
b.Fatal(err)
}
}
})
}
}
// BenchmarkUpdateParentUIDToChildren measures the cost of incremental index updates
// during setNode. This is called for EVERY resource during sync. The index uses
// set-based storage so add/remove operations are O(1) regardless of children count.
func BenchmarkUpdateParentUIDToChildren(b *testing.B) {
testCases := []struct {
name string
childrenPerParent int
}{
{"10children", 10},
{"50children", 50},
{"100children", 100},
{"500children", 500},
{"1000children", 1000},
}
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
cluster := newCluster(b)
err := cluster.EnsureSynced()
if err != nil {
b.Fatal(err)
}
parentUID := types.UID("parent-uid")
// Pre-populate with existing children
childrenSet := make(map[kube.ResourceKey]struct{})
for i := 0; i < tc.childrenPerParent; i++ {
childKey := kube.ResourceKey{
Group: "",
Kind: "Pod",
Namespace: "default",
Name: fmt.Sprintf("existing-child-%d", i),
}
childrenSet[childKey] = struct{}{}
}
cluster.parentUIDToChildren[parentUID] = childrenSet
// Create a new child key to add
newChildKey := kube.ResourceKey{
Group: "",
Kind: "Pod",
Namespace: "default",
Name: "new-child",
}
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
// Simulate adding a new child - O(1) set insertion
cluster.addToParentUIDToChildren(parentUID, newChildKey)
// Remove it so we can add it again in the next iteration
cluster.removeFromParentUIDToChildren(parentUID, newChildKey)
}
})
}
}
// BenchmarkIncrementalIndexBuild measures the cost of incremental index updates
// via addToParentUIDToChildren during sync. The index uses O(1) set-based operations.
//
// This benchmark was created to investigate issue #26863 and verify the fix.
func BenchmarkIncrementalIndexBuild(b *testing.B) {
testCases := []struct {
name string
numParents int
childrenPerParent int
}{
{"100parents_10children", 100, 10},
{"100parents_50children", 100, 50},
{"100parents_100children", 100, 100},
{"1000parents_10children", 1000, 10},
{"1000parents_100children", 1000, 100},
}
for _, tc := range testCases {
// Benchmark incremental approach (what happens during setNode)
b.Run(tc.name+"_incremental", func(b *testing.B) {
cluster := newCluster(b)
err := cluster.EnsureSynced()
if err != nil {
b.Fatal(err)
}
// Prepare parent UIDs and child keys
type childInfo struct {
parentUID types.UID
childKey kube.ResourceKey
}
children := make([]childInfo, 0, tc.numParents*tc.childrenPerParent)
for p := 0; p < tc.numParents; p++ {
parentUID := types.UID(fmt.Sprintf("parent-%d", p))
for c := 0; c < tc.childrenPerParent; c++ {
children = append(children, childInfo{
parentUID: parentUID,
childKey: kube.ResourceKey{
Kind: "Pod",
Namespace: "default",
Name: fmt.Sprintf("child-%d-%d", p, c),
},
})
}
}
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
// Clear the index
cluster.parentUIDToChildren = make(map[types.UID]map[kube.ResourceKey]struct{})
// Simulate incremental adds (O(1) set insertions)
for _, child := range children {
cluster.addToParentUIDToChildren(child.parentUID, child.childKey)
}
}
})
}
}

4
go.mod
View File

@@ -13,7 +13,7 @@ require (
github.com/TomOnTime/utfutil v1.0.0
github.com/alicebob/miniredis/v2 v2.35.0
github.com/argoproj/gitops-engine v0.7.1-0.20250908182407-97ad5b59a627
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630
github.com/argoproj/notifications-engine v0.5.1-0.20260316232552-d27ba0152c1c
github.com/argoproj/pkg v0.13.6
github.com/argoproj/pkg/v2 v2.0.1
github.com/aws/aws-sdk-go v1.55.7
@@ -36,7 +36,7 @@ require (
github.com/gfleury/go-bitbucket-v1 v0.0.0-20240917142304-df385efaac68
// DO NOT BUMP UNTIL go-git/go-git#1551 is fixed
github.com/go-git/go-git/v5 v5.14.0
github.com/go-jose/go-jose/v4 v4.1.3
github.com/go-jose/go-jose/v4 v4.1.4
github.com/go-logr/logr v1.4.3
github.com/go-openapi/loads v0.23.2
github.com/go-openapi/runtime v0.29.2

8
go.sum
View File

@@ -115,8 +115,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630 h1:naE5KNRTOALjF5nVIGUHrHU5xjlB8QJJiCu+aISIlSs=
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
github.com/argoproj/notifications-engine v0.5.1-0.20260316232552-d27ba0152c1c h1:TJxLxQybEa3vBGVJkmBb0UIA/N8TxYD32INQmijDsDY=
github.com/argoproj/notifications-engine v0.5.1-0.20260316232552-d27ba0152c1c/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8=
github.com/argoproj/pkg/v2 v2.0.1 h1:O/gCETzB/3+/hyFL/7d/VM/6pSOIRWIiBOTb2xqAHvc=
@@ -309,8 +309,8 @@ github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA=
github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=

View File

@@ -308,7 +308,7 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
command:
- sh
- '-c'
@@ -373,4 +373,4 @@ spec:
app.kubernetes.io/part-of: argocd
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
kubernetes.io/os: linux

View File

@@ -31881,8 +31881,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -31715,8 +31715,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -33516,8 +33516,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -33352,8 +33352,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -2774,8 +2774,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -2610,8 +2610,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -32546,8 +32546,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -32380,8 +32380,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -1804,8 +1804,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -1638,8 +1638,8 @@ spec:
name: plugins
initContainers:
- args:
- /bin/cp --update=none /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln
-s /var/run/argocd/argocd /var/run/argocd/argocd-cmp-server
- /bin/cp /usr/local/bin/argocd /var/run/argocd/argocd && /bin/ln -sf /var/run/argocd/argocd
/var/run/argocd/argocd-cmp-server
command:
- sh
- -c

View File

@@ -29,4 +29,7 @@ const (
// KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster
KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc"
// KubernetesInClusterName is the name of the local in-cluster cluster
KubernetesInClusterName = "in-cluster"
)

View File

@@ -1295,7 +1295,7 @@ func helmTemplate(appPath string, repoRoot string, env *v1alpha1.Env, q *apiclie
return nil, "", fmt.Errorf("error getting helm repos: %w", err)
}
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials)
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
if err != nil {
return nil, "", fmt.Errorf("error initializing helm app object: %w", err)
}
@@ -2256,7 +2256,7 @@ func populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse, appPath strin
if err != nil {
return err
}
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials)
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
if err != nil {
return err
}

View File

@@ -323,8 +323,6 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts, appsetOpts Applicatio
appsetLister := appFactory.Argoproj().V1alpha1().ApplicationSets().Lister()
userStateStorage := util_session.NewUserStateStorage(opts.RedisClient)
ssoClientApp, err := oidc.NewClientApp(settings, opts.DexServerAddr, opts.DexTLSConfig, opts.BaseHRef, cacheutil.NewRedisCache(opts.RedisClient, settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
errorsutil.CheckError(err)
sessionMgr := util_session.NewSessionManager(settingsMgr, projLister, opts.DexServerAddr, opts.DexTLSConfig, userStateStorage)
enf := rbac.NewEnforcer(opts.KubeClientset, opts.Namespace, common.ArgoCDRBACConfigMapName, nil)
enf.EnableEnforce(!opts.DisableAuth)
@@ -372,7 +370,6 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts, appsetOpts Applicatio
a := &ArgoCDServer{
ArgoCDServerOpts: opts,
ApplicationSetOpts: appsetOpts,
ssoClientApp: ssoClientApp,
log: logger,
settings: settings,
sessionMgr: sessionMgr,
@@ -573,6 +570,10 @@ func (server *ArgoCDServer) Run(ctx context.Context, listeners *Listeners) {
if server.RedisClient != nil {
cacheutil.CollectMetrics(server.RedisClient, metricsServ, server.userStateStorage.GetLockObject())
}
// OIDC config needs to be refreshed at each server restart
ssoClientApp, err := oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
errorsutil.CheckError(err)
server.ssoClientApp = ssoClientApp
// Don't init storage until after CollectMetrics. CollectMetrics adds hooks to the Redis client, and Init
// reads those hooks. If this is called first, there may be a data race.

View File

@@ -484,6 +484,98 @@ func TestGracefulShutdown(t *testing.T) {
assert.True(t, shutdown)
}
func TestOIDCRefresh(t *testing.T) {
port, err := test.GetFreePort()
require.NoError(t, err)
mockRepoClient := &mocks.Clientset{RepoServerServiceClient: &mocks.RepoServerServiceClient{}}
cm := test.NewFakeConfigMap()
cm.Data["oidc.config"] = `
name: Test OIDC
issuer: $oidc.myoidc.issuer
clientID: $oidc.myoidc.clientId
clientSecret: $oidc.myoidc.clientSecret
`
secret := test.NewFakeSecret()
issuerURL := "http://oidc.127.0.0.1.nip.io"
updatedIssuerURL := "http://newoidc.127.0.0.1.nip.io"
secret.Data["oidc.myoidc.issuer"] = []byte(issuerURL)
secret.Data["oidc.myoidc.clientId"] = []byte("myClientId")
secret.Data["oidc.myoidc.clientSecret"] = []byte("myClientSecret")
kubeclientset := fake.NewSimpleClientset(cm, secret)
redis, redisCloser := test.NewInMemoryRedis()
defer redisCloser()
s := NewServer(
t.Context(),
ArgoCDServerOpts{
ListenPort: port,
Namespace: test.FakeArgoCDNamespace,
KubeClientset: kubeclientset,
AppClientset: apps.NewSimpleClientset(),
RepoClientset: mockRepoClient,
RedisClient: redis,
},
ApplicationSetOpts{},
)
projInformerCancel := test.StartInformer(s.projInformer)
defer projInformerCancel()
appInformerCancel := test.StartInformer(s.appInformer)
defer appInformerCancel()
appsetInformerCancel := test.StartInformer(s.appsetInformer)
defer appsetInformerCancel()
shutdown := false
lns, err := s.Listen()
require.NoError(t, err)
runCtx := t.Context()
var wg gosync.WaitGroup
wg.Add(1)
go func(shutdown *bool) {
defer wg.Done()
s.Run(runCtx, lns)
*shutdown = true
}(&shutdown)
for !s.available.Load() {
time.Sleep(10 * time.Millisecond)
}
assert.True(t, s.available.Load())
assert.Equal(t, issuerURL, s.ssoClientApp.IssuerURL())
// Update oidc config
secret.Data["oidc.myoidc.issuer"] = []byte(updatedIssuerURL)
secret.ResourceVersion = "12345"
_, err = kubeclientset.CoreV1().Secrets(test.FakeArgoCDNamespace).Update(runCtx, secret, metav1.UpdateOptions{})
require.NoError(t, err)
// Wait for graceful shutdown
wg.Wait()
for s.available.Load() {
time.Sleep(10 * time.Millisecond)
}
assert.False(t, s.available.Load())
shutdown = false
wg.Add(1)
go func(shutdown *bool) {
defer wg.Done()
s.Run(runCtx, lns)
*shutdown = true
}(&shutdown)
for !s.available.Load() {
time.Sleep(10 * time.Millisecond)
}
assert.True(t, s.available.Load())
assert.Equal(t, updatedIssuerURL, s.ssoClientApp.IssuerURL())
s.stopCh <- syscall.SIGINT
wg.Wait()
}
func TestAuthenticate(t *testing.T) {
type testData struct {
test string

View File

@@ -1049,39 +1049,51 @@ type ClusterGetter interface {
GetClusterServersByName(ctx context.Context, server string) ([]string, error)
}
// GetDestinationServer resolves the cluster server URL for the given destination without
// fetching the full Cluster object. For server based destinations the URL is returned
// directly (normalized). For name based destinations GetClusterServersByName is called.
// An error is returned if the name is ambiguous or missing.
func GetDestinationServer(ctx context.Context, destination argoappv1.ApplicationDestination, db ClusterGetter) (string, error) {
if destination.Name != "" && destination.Server != "" {
return "", fmt.Errorf("application destination can't have both name and server defined: %s %s", destination.Name, destination.Server)
}
if destination.Server != "" {
return strings.TrimRight(destination.Server, "/"), nil
}
if destination.Name != "" {
clusterURLs, err := db.GetClusterServersByName(ctx, destination.Name)
if err != nil {
return "", fmt.Errorf("error getting cluster by name %q: %w", destination.Name, err)
}
if len(clusterURLs) == 0 {
return "", fmt.Errorf("there are no clusters with this name: %s", destination.Name)
}
if len(clusterURLs) > 1 {
return "", fmt.Errorf("there are %d clusters with the same name: [%s]", len(clusterURLs), strings.Join(clusterURLs, " "))
}
return clusterURLs[0], nil
}
// nolint:staticcheck // Error constant is very old, shouldn't lowercase the first letter.
return "", errors.New(ErrDestinationMissing)
}
// GetDestinationCluster returns the cluster object based on the destination server or name. If both are provided or
// both are empty, an error is returned. If the destination server is provided, the cluster is fetched by the server
// URL. If the destination name is provided, the cluster is fetched by the name. If multiple clusters have the specified
// name, an error is returned.
func GetDestinationCluster(ctx context.Context, destination argoappv1.ApplicationDestination, db ClusterGetter) (*argoappv1.Cluster, error) {
if destination.Name != "" && destination.Server != "" {
return nil, fmt.Errorf("application destination can't have both name and server defined: %s %s", destination.Name, destination.Server)
server, err := GetDestinationServer(ctx, destination, db)
if err != nil {
return nil, err
}
if destination.Server != "" {
cluster, err := db.GetCluster(ctx, destination.Server)
if err != nil {
cluster, err := db.GetCluster(ctx, server)
if err != nil {
if destination.Server != "" {
return nil, fmt.Errorf("error getting cluster by server %q: %w", destination.Server, err)
}
return cluster, nil
} else if destination.Name != "" {
clusterURLs, err := db.GetClusterServersByName(ctx, destination.Name)
if err != nil {
return nil, fmt.Errorf("error getting cluster by name %q: %w", destination.Name, err)
}
if len(clusterURLs) == 0 {
return nil, fmt.Errorf("there are no clusters with this name: %s", destination.Name)
}
if len(clusterURLs) > 1 {
return nil, fmt.Errorf("there are %d clusters with the same name: [%s]", len(clusterURLs), strings.Join(clusterURLs, " "))
}
cluster, err := db.GetCluster(ctx, clusterURLs[0])
if err != nil {
return nil, fmt.Errorf("error getting cluster by URL: %w", err)
}
return cluster, nil
return nil, fmt.Errorf("error getting cluster by URL: %w", err)
}
// nolint:staticcheck // Error constant is very old, shouldn't lowercase the first letter.
return nil, errors.New(ErrDestinationMissing)
return cluster, nil
}
func GetGlobalProjects(proj *argoappv1.AppProject, projLister applicationsv1.AppProjectLister, settingsManager *settings.SettingsManager) []*argoappv1.AppProject {

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"maps"
"slices"
"strconv"
"strings"
"sync"
@@ -25,9 +26,11 @@ import (
"github.com/argoproj/argo-cd/v3/util/settings"
)
const errCheckingInClusterEnabled = "failed to check in-cluster enabled in %s: %w"
var (
localCluster = appv1.Cluster{
Name: "in-cluster",
Name: appv1.KubernetesInClusterName,
Server: appv1.KubernetesInternalAPIServerAddr,
ConnectionState: appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful},
}
@@ -234,7 +237,10 @@ func (db *db) getClusterSecret(server string) (*corev1.Secret, error) {
// GetCluster returns a cluster from a query
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
informer := db.settingsMgr.GetClusterInformer()
informer, err := db.settingsMgr.GetClusterInformer()
if err != nil {
return nil, fmt.Errorf("failed to get cluster informer: %w", err)
}
if server == appv1.KubernetesInternalAPIServerAddr {
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
@@ -285,24 +291,32 @@ func (db *db) GetProjectClusters(_ context.Context, project string) ([]*appv1.Cl
}
func (db *db) GetClusterServersByName(_ context.Context, name string) ([]string, error) {
argoSettings, err := db.settingsMgr.GetSettings()
informer, err := db.settingsMgr.GetClusterInformer()
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get cluster informer: %w", err)
}
informer := db.settingsMgr.GetClusterInformer()
servers, err := informer.GetClusterServersByName(name)
if err != nil {
return nil, err
}
// attempt to short circuit if the in-cluster name is not involved
if name != appv1.KubernetesInClusterName && !slices.Contains(servers, appv1.KubernetesInternalAPIServerAddr) {
return servers, nil
}
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
if err != nil {
return nil, fmt.Errorf(errCheckingInClusterEnabled, "GetClusterServersByName", err)
}
// Handle local cluster special case
if len(servers) == 0 && name == "in-cluster" && argoSettings.InClusterEnabled {
if len(servers) == 0 && name == appv1.KubernetesInClusterName && inClusterEnabled {
return []string{appv1.KubernetesInternalAPIServerAddr}, nil
}
// Filter out disabled in-cluster
if !argoSettings.InClusterEnabled {
if !inClusterEnabled {
filtered := make([]string, 0, len(servers))
for _, s := range servers {
if s != appv1.KubernetesInternalAPIServerAddr {

View File

@@ -661,6 +661,70 @@ func TestGetClusterServersByName(t *testing.T) {
})
}
func TestGetClusterServersByName_IsInClusterEnabledLazyLoad(t *testing.T) {
argoCDSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string][]byte{
"admin.password": nil,
"server.secretkey": nil,
},
}
prodSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster-secret",
Namespace: fakeNamespace,
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
Annotations: map[string]string{
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
},
},
Data: map[string][]byte{
"name": []byte("prod"),
"server": []byte("https://prod.example.com"),
"config": []byte("{}"),
},
}
tests := []struct {
name string
clusterName string
wantErr bool
wantServers []string
}{
{
name: "non in-cluster name does not call IsInClusterEnabled()",
clusterName: "prod",
wantErr: false,
wantServers: []string{"https://prod.example.com"},
},
{
name: "in-cluster name calls IsInClusterEnabled()",
clusterName: "in-cluster",
wantErr: true,
},
}
// argocd-cm is intentionally absent: IsInClusterEnabled() fails if called.
kubeclientset := fake.NewClientset(argoCDSecret, prodSecret)
db := NewDB(fakeNamespace, settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace), kubeclientset)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
servers, err := db.GetClusterServersByName(t.Context(), tt.clusterName)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
assert.ElementsMatch(t, tt.wantServers, servers)
}
})
}
}
// TestClusterRaceConditionClusterSecrets reproduces a race condition
// on the cluster secrets. The test isn't asserting anything because
// before the fix it would cause a panic from concurrent map iteration and map write

View File

@@ -1041,7 +1041,7 @@ func (m *nativeGitClient) CheckoutOrOrphan(branch string, submoduleEnabled bool)
}
// Make an empty initial commit.
out, err = m.runCmd(ctx, "commit", "--allow-empty", "-m", "Initial commit")
out, err = m.runCmd(ctx, "commit", "--allow-empty", "-m", "Initial commit for "+branch)
if err != nil {
return out, fmt.Errorf("failed to commit initial commit: %w", err)
}

View File

@@ -697,6 +697,11 @@ func Test_nativeGitClient_CheckoutOrOrphan(t *testing.T) {
currentCommitHash := strings.TrimSpace(string(gitCurrentCommitHash))
require.NotEqual(t, baseCommitHash, currentCommitHash)
gitCurrentCommitMessage, err := outputCmd(ctx, tempDir, "git", "log", "--format=%B", "-n", "1", "HEAD")
require.NoError(t, err)
currentCommitMessage := strings.TrimSpace(string(gitCurrentCommitMessage))
require.Contains(t, currentCommitMessage, expectedBranch)
// get commit count on current branch, verify 1 -> orphan
gitCommitCount, err := outputCmd(ctx, tempDir, "git", "rev-list", "--count", actualBranch)
require.NoError(t, err)

View File

@@ -327,8 +327,12 @@ func (c *Cmd) PullOCI(repo string, chart string, version string, destination str
return out, nil
}
func (c *Cmd) dependencyBuild() (string, error) {
out, _, err := c.run(context.Background(), "dependency", "build")
func (c *Cmd) dependencyBuild(insecure bool) (string, error) {
args := []string{"dependency", "build"}
if insecure {
args = append(args, "--insecure-skip-tls-verify")
}
out, _, err := c.run(context.Background(), args...)
if err != nil {
return "", fmt.Errorf("failed to build dependencies: %w", err)
}

View File

@@ -135,6 +135,36 @@ func TestRegistryLogin(t *testing.T) {
}
}
func TestDependencyBuild(t *testing.T) {
tests := []struct {
name string
insecure bool
expectedOut string
}{
{
name: "without insecure",
insecure: false,
expectedOut: "helm dependency build",
},
{
name: "with insecure",
insecure: true,
expectedOut: "helm dependency build --insecure-skip-tls-verify",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
c, err := newCmdWithVersion(".", false, "", "", func(cmd *exec.Cmd, _ func(_ string) string) (string, error) {
return strings.Join(cmd.Args, " "), nil
})
require.NoError(t, err)
out, err := c.dependencyBuild(tc.insecure)
require.NoError(t, err)
assert.Equal(t, tc.expectedOut, out)
})
}
}
func TestRegistryLogout(t *testing.T) {
tests := []struct {
name string

View File

@@ -43,20 +43,21 @@ type Helm interface {
}
// NewHelmApp create a new wrapper to run commands on the `helm` command-line tool.
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool) (Helm, error) {
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool, insecure bool) (Helm, error) {
cmd, err := NewCmd(workDir, version, proxy, noProxy)
if err != nil {
return nil, fmt.Errorf("failed to create new helm command: %w", err)
}
cmd.IsLocal = isLocal
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials}, nil
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials, insecure: insecure}, nil
}
type helm struct {
cmd Cmd
repos []HelmRepository
passCredentials bool
insecure bool
}
var _ Helm = &helm{}
@@ -108,7 +109,7 @@ func (h *helm) DependencyBuild() error {
}
}
h.repos = nil
_, err := h.cmd.dependencyBuild()
_, err := h.cmd.dependencyBuild(h.insecure)
if err != nil {
return fmt.Errorf("failed to build helm dependencies: %w", err)
}

View File

@@ -25,7 +25,7 @@ func template(h Helm, opts *TemplateOpts) ([]*unstructured.Unstructured, error)
}
func TestHelmTemplateParams(t *testing.T) {
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false)
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false, false)
require.NoError(t, err)
opts := TemplateOpts{
Name: "test",
@@ -58,7 +58,7 @@ func TestHelmTemplateValues(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false, false)
require.NoError(t, err)
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
require.NoError(t, err)
@@ -84,7 +84,7 @@ func TestHelmGetParams(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
require.NoError(t, err)
params, err := h.GetParameters(nil, repoRootAbs, repoRootAbs)
require.NoError(t, err)
@@ -97,7 +97,7 @@ func TestHelmGetParamsValueFiles(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
require.NoError(t, err)
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
require.NoError(t, err)
@@ -112,7 +112,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
require.NoError(t, err)
valuesMissingPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-missing.yaml", nil)
require.NoError(t, err)
@@ -126,7 +126,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
}
func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{Name: "my-release"})
@@ -144,7 +144,7 @@ func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
}
func TestHelmTemplateReleaseName(t *testing.T) {
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{Name: "test"})
require.NoError(t, err)
@@ -206,7 +206,7 @@ func Test_flatVals(t *testing.T) {
}
func TestAPIVersions(t *testing.T) {
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{})
@@ -221,7 +221,7 @@ func TestAPIVersions(t *testing.T) {
}
func TestKubeVersionWithSymbol(t *testing.T) {
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{KubeVersion: "1.30.11+IKS"})
@@ -244,7 +244,7 @@ func TestKubeVersionWithSymbol(t *testing.T) {
}
func TestSkipCrds(t *testing.T) {
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{SkipCrds: false})
@@ -261,7 +261,7 @@ func TestSkipCrds(t *testing.T) {
}
func TestSkipTests(t *testing.T) {
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{SkipTests: false})

View File

@@ -983,3 +983,7 @@ func FormatAccessTokenCacheKey(sub string) string {
func formatOidcTokenCacheKey(sub string, sid string) string {
return fmt.Sprintf("%s_%s_%s", OidcTokenCachePrefix, sub, sid)
}
func (a *ClientApp) IssuerURL() string {
return a.issuerURL
}

View File

@@ -657,10 +657,20 @@ func (mgr *SettingsManager) GetSecretsInformer() (cache.SharedIndexInformer, err
}
// GetClusterInformer returns the cluster cache for optimized cluster lookups.
func (mgr *SettingsManager) GetClusterInformer() *ClusterInformer {
// Ensure the settings manager is initialized
_ = mgr.ensureSynced(false)
return mgr.clusterInformer
func (mgr *SettingsManager) GetClusterInformer() (*ClusterInformer, error) {
if err := mgr.ensureSynced(false); err != nil {
return nil, fmt.Errorf("error ensuring that the settings manager is synced: %w", err)
}
return mgr.clusterInformer, nil
}
// IsInClusterEnabled returns whether the in-cluster server address is enabled.
func (mgr *SettingsManager) IsInClusterEnabled() (bool, error) {
s, err := mgr.GetSettings()
if err != nil {
return false, err
}
return s.InClusterEnabled, nil
}
func (mgr *SettingsManager) updateSecret(callback func(*corev1.Secret) error) error {

View File

@@ -11,20 +11,29 @@ import (
// filename of ReDoc script in UI's assets/scripts path
const redocScriptName = "redoc.standalone.js"
// withFrameOptions wraps an http.Handler to set headers that prevent iframe embedding (clickjacking protection).
func withFrameOptions(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Frame-Options", "DENY")
w.Header().Set("Content-Security-Policy", "frame-ancestors 'none'")
h.ServeHTTP(w, r)
})
}
// ServeSwaggerUI serves the Swagger UI and JSON spec.
func ServeSwaggerUI(mux *http.ServeMux, swaggerJSON string, uiPath string, rootPath string) {
prefix := path.Dir(uiPath)
swaggerPath := path.Join(prefix, "swagger.json")
mux.HandleFunc(swaggerPath, func(w http.ResponseWriter, _ *http.Request) {
mux.Handle(swaggerPath, withFrameOptions(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
_, _ = fmt.Fprint(w, swaggerJSON)
})
})))
specURL := path.Join(prefix, rootPath, "swagger.json")
scriptURL := path.Join(prefix, rootPath, "assets", "scripts", redocScriptName)
mux.Handle(uiPath, middleware.Redoc(middleware.RedocOpts{
mux.Handle(uiPath, withFrameOptions(middleware.Redoc(middleware.RedocOpts{
BasePath: prefix,
SpecURL: specURL,
Path: path.Base(uiPath),
RedocURL: scriptURL,
}, http.NotFoundHandler()))
}, http.NotFoundHandler())))
}

View File

@@ -52,4 +52,19 @@ func TestSwaggerUI(t *testing.T) {
require.NoError(t, err)
require.Equalf(t, http.StatusOK, resp.StatusCode, "Was expecting status code 200 from swagger-ui, but got %d instead", resp.StatusCode)
require.NoError(t, resp.Body.Close())
// Verify clickjacking protection headers on swagger.json
require.Equal(t, "DENY", resp.Header.Get("X-Frame-Options"))
require.Equal(t, "frame-ancestors 'none'", resp.Header.Get("Content-Security-Policy"))
// Verify clickjacking protection headers on swagger-ui
uiReq, err := http.NewRequestWithContext(t.Context(), http.MethodGet, server+"/swagger-ui", http.NoBody)
require.NoError(t, err)
uiResp, err := http.DefaultClient.Do(uiReq)
require.NoError(t, err)
require.Equalf(t, http.StatusOK, uiResp.StatusCode, "Was expecting status code 200 from swagger-ui, but got %d instead", uiResp.StatusCode)
require.Equal(t, "DENY", uiResp.Header.Get("X-Frame-Options"))
require.Equal(t, "frame-ancestors 'none'", uiResp.Header.Get("Content-Security-Policy"))
require.NoError(t, uiResp.Body.Close())
}