Compare commits

..

2 Commits

Author SHA1 Message Date
argo-cd-cherry-pick-bot[bot]
e53c10caec fix(UI): show RollingSync step clearly when labels match no step (cherry-pick #26877 for 3.3) (#26884)
Signed-off-by: Atif Ali <atali@redhat.com>
Co-authored-by: Atif Ali <atali@redhat.com>
2026-03-17 21:43:06 -04:00
argo-cd-cherry-pick-bot[bot]
422cabb648 fix: stack overflow when processing circular ownerrefs in resource graph (#26783) (cherry-pick #26790 for 3.3) (#26879)
Signed-off-by: Jonathan Ogilvie <jonathan.ogilvie@sumologic.com>
Signed-off-by: Jonathan Ogilvie <679297+jcogilvie@users.noreply.github.com>
Co-authored-by: Jonathan Ogilvie <679297+jcogilvie@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-17 22:55:34 +01:00
5 changed files with 180 additions and 37 deletions

View File

@@ -92,6 +92,15 @@ const (
RespectRbacStrict
)
// callState tracks whether action() has been called on a resource during hierarchy iteration.
type callState int
const (
notCalled callState = iota // action() has not been called yet
inProgress // action() is currently being processed (in call stack)
completed // action() has been called and processing is complete
)
type apiMeta struct {
namespaced bool
// watchCancel stops the watch of all resources for this API. This gets called when the cache is invalidated or when
@@ -1186,8 +1195,11 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
c.lock.RLock()
defer c.lock.RUnlock()
// Track visited resources to avoid cycles
visited := make(map[kube.ResourceKey]int)
// Track whether action() has been called on each resource (notCalled/inProgress/completed).
// This is shared across processNamespaceHierarchy and processCrossNamespaceChildren.
// Note: This is distinct from 'crossNSTraversed' in processCrossNamespaceChildren, which tracks
// whether we've traversed a cluster-scoped key's cross-namespace children.
actionCallState := make(map[kube.ResourceKey]callState)
// Group keys by namespace for efficient processing
keysPerNamespace := make(map[string][]kube.ResourceKey)
@@ -1203,12 +1215,18 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
for namespace, namespaceKeys := range keysPerNamespace {
nsNodes := c.nsIndex[namespace]
graph := buildGraph(nsNodes)
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, visited, action)
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, actionCallState, action)
}
// Process pre-computed cross-namespace children
if clusterKeys, ok := keysPerNamespace[""]; ok {
c.processCrossNamespaceChildren(clusterKeys, visited, action)
// Track which cluster-scoped keys have had their cross-namespace children traversed.
// This is distinct from 'actionCallState' - a resource may have had action() called
// (i.e., its actionCallState is in the completed state) but not yet had its cross-namespace
// children traversed. This prevents infinite recursion when resources have circular
// ownerReferences.
crossNSTraversed := make(map[kube.ResourceKey]bool)
c.processCrossNamespaceChildren(clusterKeys, actionCallState, crossNSTraversed, action)
}
}
@@ -1216,12 +1234,21 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries.
// It also handles multi-level hierarchies where cluster-scoped resources own other cluster-scoped resources
// that in turn own namespaced resources (e.g., Provider -> ProviderRevision -> Deployment in Crossplane).
// The crossNSTraversed map tracks which keys have already been processed to prevent infinite recursion
// from circular ownerReferences (e.g., a resource that owns itself).
func (c *clusterCache) processCrossNamespaceChildren(
clusterScopedKeys []kube.ResourceKey,
visited map[kube.ResourceKey]int,
actionCallState map[kube.ResourceKey]callState,
crossNSTraversed map[kube.ResourceKey]bool,
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
for _, clusterKey := range clusterScopedKeys {
// Skip if already processed (cycle detection)
if crossNSTraversed[clusterKey] {
continue
}
crossNSTraversed[clusterKey] = true
// Get cluster-scoped resource to access its UID
clusterResource := c.resources[clusterKey]
if clusterResource == nil {
@@ -1236,16 +1263,17 @@ func (c *clusterCache) processCrossNamespaceChildren(
continue
}
alreadyVisited := visited[childKey] != 0
alreadyProcessed := actionCallState[childKey] != notCalled
// If child is cluster-scoped and was already visited by processNamespaceHierarchy,
// If child is cluster-scoped and action() was already called by processNamespaceHierarchy,
// we still need to recursively check for its cross-namespace children.
// This handles multi-level hierarchies like: ClusterScoped -> ClusterScoped -> Namespaced
// (e.g., Crossplane's Provider -> ProviderRevision -> Deployment)
if alreadyVisited {
if alreadyProcessed {
if childKey.Namespace == "" {
// Recursively process cross-namespace children of this cluster-scoped child
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
// The crossNSTraversed map prevents infinite recursion on circular ownerReferences
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, actionCallState, crossNSTraversed, action)
}
continue
}
@@ -1258,16 +1286,16 @@ func (c *clusterCache) processCrossNamespaceChildren(
// Process this child
if action(child, nsNodes) {
visited[childKey] = 1
actionCallState[childKey] = inProgress
// Recursively process descendants using index-based traversal
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
c.iterateChildrenUsingIndex(child, nsNodes, actionCallState, action)
// If this child is also cluster-scoped, recursively process its cross-namespace children
if childKey.Namespace == "" {
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, actionCallState, crossNSTraversed, action)
}
visited[childKey] = 2
actionCallState[childKey] = completed
}
}
}
@@ -1278,14 +1306,14 @@ func (c *clusterCache) processCrossNamespaceChildren(
func (c *clusterCache) iterateChildrenUsingIndex(
parent *Resource,
nsNodes map[kube.ResourceKey]*Resource,
visited map[kube.ResourceKey]int,
actionCallState map[kube.ResourceKey]callState,
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
// Look up direct children of this parent using the index
childKeys := c.parentUIDToChildren[parent.Ref.UID]
for _, childKey := range childKeys {
if visited[childKey] != 0 {
continue // Already visited or in progress
if actionCallState[childKey] != notCalled {
continue // action() already called or in progress
}
child := c.resources[childKey]
@@ -1300,10 +1328,10 @@ func (c *clusterCache) iterateChildrenUsingIndex(
}
if action(child, nsNodes) {
visited[childKey] = 1
actionCallState[childKey] = inProgress
// Recursively process this child's descendants
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
visited[childKey] = 2
c.iterateChildrenUsingIndex(child, nsNodes, actionCallState, action)
actionCallState[childKey] = completed
}
}
}
@@ -1313,22 +1341,19 @@ func (c *clusterCache) processNamespaceHierarchy(
namespaceKeys []kube.ResourceKey,
nsNodes map[kube.ResourceKey]*Resource,
graph map[kube.ResourceKey]map[types.UID]*Resource,
visited map[kube.ResourceKey]int,
actionCallState map[kube.ResourceKey]callState,
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
for _, key := range namespaceKeys {
visited[key] = 0
}
for _, key := range namespaceKeys {
res := c.resources[key]
if visited[key] == 2 || !action(res, nsNodes) {
if actionCallState[key] == completed || !action(res, nsNodes) {
continue
}
visited[key] = 1
actionCallState[key] = inProgress
if _, ok := graph[key]; ok {
for _, child := range graph[key] {
if visited[child.ResourceKey()] == 0 && action(child, nsNodes) {
child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
if actionCallState[child.ResourceKey()] == notCalled && action(child, nsNodes) {
child.iterateChildrenV2(graph, nsNodes, actionCallState, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
if err != nil {
c.log.V(2).Info(err.Error())
return false
@@ -1338,7 +1363,7 @@ func (c *clusterCache) processNamespaceHierarchy(
}
}
}
visited[key] = 2
actionCallState[key] = completed
}
}

View File

@@ -2189,3 +2189,112 @@ func TestIterateHierarchyV2_NoDuplicatesCrossNamespace(t *testing.T) {
assert.Equal(t, 1, visitCount["namespaced-child"], "namespaced child should be visited once")
assert.Equal(t, 1, visitCount["cluster-child"], "cluster child should be visited once")
}
func TestIterateHierarchyV2_CircularOwnerReference_NoStackOverflow(t *testing.T) {
// Test that self-referencing resources (circular ownerReferences) don't cause stack overflow.
// This reproduces the bug reported in https://github.com/argoproj/argo-cd/issues/26783
// where a resource with an ownerReference pointing to itself caused infinite recursion.
// Create a cluster-scoped resource that owns itself (self-referencing)
selfReferencingResource := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "self-referencing",
UID: "self-ref-uid",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "self-referencing",
UID: "self-ref-uid", // Points to itself
}},
},
}
cluster := newCluster(t, selfReferencingResource).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
Meta: metav1.APIResource{Namespaced: false},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
visitCount := 0
// This should complete without stack overflow
cluster.IterateHierarchyV2(
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(selfReferencingResource))},
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
visitCount++
return true
},
)
// The self-referencing resource should be visited exactly once
assert.Equal(t, 1, visitCount, "self-referencing resource should be visited exactly once")
}
func TestIterateHierarchyV2_CircularOwnerChain_NoStackOverflow(t *testing.T) {
// Test that circular ownership chains (A -> B -> A) don't cause stack overflow.
// This is a more complex case where two resources own each other.
resourceA := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "resource-a",
UID: "uid-a",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "resource-b",
UID: "uid-b", // A is owned by B
}},
},
}
resourceB := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "resource-b",
UID: "uid-b",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "resource-a",
UID: "uid-a", // B is owned by A
}},
},
}
cluster := newCluster(t, resourceA, resourceB).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
Meta: metav1.APIResource{Namespaced: false},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
visitCount := make(map[string]int)
// This should complete without stack overflow
cluster.IterateHierarchyV2(
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(resourceA))},
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
visitCount[resource.Ref.Name]++
return true
},
)
// Each resource in the circular chain should be visited exactly once
assert.Equal(t, 1, visitCount["resource-a"], "resource-a should be visited exactly once")
assert.Equal(t, 1, visitCount["resource-b"], "resource-b should be visited exactly once")
}

View File

@@ -76,16 +76,16 @@ func (r *Resource) toOwnerRef() metav1.OwnerReference {
}
// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource.
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, actionCallState map[kube.ResourceKey]callState, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
key := r.ResourceKey()
if visited[key] == 2 {
if actionCallState[key] == completed {
return
}
// this indicates that we've started processing this node's children
visited[key] = 1
actionCallState[key] = inProgress
defer func() {
// this indicates that we've finished processing this node's children
visited[key] = 2
actionCallState[key] = completed
}()
children, ok := graph[key]
if !ok || children == nil {
@@ -94,13 +94,13 @@ func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*R
for _, child := range children {
childKey := child.ResourceKey()
// For cross-namespace relationships, child might not be in ns, so use it directly from graph
switch visited[childKey] {
case 1:
switch actionCallState[childKey] {
case inProgress:
// Since we encountered a node that we're currently processing, we know we have a circular dependency.
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
case 0:
case notCalled:
if action(nil, child, ns) {
child.iterateChildrenV2(graph, ns, visited, action)
child.iterateChildrenV2(graph, ns, actionCallState, action)
}
}
}

View File

@@ -8,6 +8,7 @@ import {services} from '../../../shared/services';
import {
ApplicationSyncWindowStatusIcon,
ComparisonStatusIcon,
formatApplicationSetProgressiveSyncStep,
getAppDefaultSource,
getAppDefaultSyncRevisionExtra,
getAppOperationState,
@@ -134,7 +135,7 @@ const ProgressiveSyncStatus = ({application}: {application: models.Application})
<div className='application-status-panel__item-value' style={{color: getProgressiveSyncStatusColor(appResource.status)}}>
{getProgressiveSyncStatusIcon({status: appResource.status})}&nbsp;{appResource.status}
</div>
{appResource?.step && <div className='application-status-panel__item-value'>Wave: {appResource.step}</div>}
{appResource?.step !== undefined && <div className='application-status-panel__item-value'>{formatApplicationSetProgressiveSyncStep(appResource.step)}</div>}
{lastTransitionTime && (
<div className='application-status-panel__item-name' style={{marginBottom: '0.5em'}}>
Last Transition: <br />

View File

@@ -1803,6 +1803,14 @@ export function getAppUrl(app: appModels.AbstractApplication): string {
return `${basePath}/${app.metadata.namespace}/${app.metadata.name}`;
}
/** RollingSync step for display; backend uses -1 when no step matches the app's labels. */
export function formatApplicationSetProgressiveSyncStep(step: string | undefined): string {
if (step === '-1') {
return 'Step: unmatched label';
}
return `Step: ${step ?? ''}`;
}
export const getProgressiveSyncStatusIcon = ({status, isButton}: {status: string; isButton?: boolean}) => {
const getIconProps = () => {
switch (status) {