mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 01:28:45 +01:00
feat: cross namespace hierarchy traversal from cluster-scoped parents to namespaced children (fixes #24379) (#24847)
Signed-off-by: Jonathan Ogilvie <jonathan.ogilvie@sumologic.com>
This commit is contained in:
@@ -609,6 +609,8 @@ func (ctrl *ApplicationController) getResourceTree(destCluster *appv1.Cluster, a
|
||||
managedResourcesKeys = append(managedResourcesKeys, kube.GetResourceKey(live))
|
||||
}
|
||||
}
|
||||
// Process managed resources and their children, including cross-namespace relationships
|
||||
// from cluster-scoped parents (e.g., Crossplane CompositeResourceDefinitions)
|
||||
err = ctrl.stateCache.IterateHierarchyV2(destCluster, managedResourcesKeys, func(child appv1.ResourceNode, _ string) bool {
|
||||
permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.Group, Kind: child.Kind}, child.Name, child.Namespace, destCluster, func(project string) ([]*appv1.Cluster, error) {
|
||||
clusters, err := ctrl.db.GetProjectClusters(context.TODO(), project)
|
||||
@@ -634,6 +636,7 @@ func (ctrl *ApplicationController) getResourceTree(destCluster *appv1.Cluster, a
|
||||
orphanedNodesKeys = append(orphanedNodesKeys, k)
|
||||
}
|
||||
}
|
||||
// Process orphaned resources
|
||||
err = ctrl.stateCache.IterateHierarchyV2(destCluster, orphanedNodesKeys, func(child appv1.ResourceNode, appName string) bool {
|
||||
belongToAnotherApp := false
|
||||
if appName != "" {
|
||||
|
||||
42
controller/cache/cache.go
vendored
42
controller/cache/cache.go
vendored
@@ -270,7 +270,7 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
|
||||
return &cacheSettings{clusterSettings, appInstanceLabelKey, appv1.TrackingMethod(trackingMethod), installationID, resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
|
||||
}
|
||||
|
||||
func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
|
||||
func asResourceNode(r *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) appv1.ResourceNode {
|
||||
gv, err := schema.ParseGroupVersion(r.Ref.APIVersion)
|
||||
if err != nil {
|
||||
gv = schema.GroupVersion{}
|
||||
@@ -278,14 +278,30 @@ func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
|
||||
parentRefs := make([]appv1.ResourceRef, len(r.OwnerRefs))
|
||||
for i, ownerRef := range r.OwnerRefs {
|
||||
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
|
||||
parentRefs[i] = appv1.ResourceRef{
|
||||
Group: ownerGvk.Group,
|
||||
Kind: ownerGvk.Kind,
|
||||
Version: ownerGvk.Version,
|
||||
Namespace: r.Ref.Namespace,
|
||||
Name: ownerRef.Name,
|
||||
UID: string(ownerRef.UID),
|
||||
parentRef := appv1.ResourceRef{
|
||||
Group: ownerGvk.Group,
|
||||
Kind: ownerGvk.Kind,
|
||||
Version: ownerGvk.Version,
|
||||
Name: ownerRef.Name,
|
||||
UID: string(ownerRef.UID),
|
||||
}
|
||||
|
||||
// Look up the parent in namespace resources
|
||||
// If found, it's namespaced and we use its namespace
|
||||
// If not found, it must be cluster-scoped (namespace = "")
|
||||
parentKey := kube.NewResourceKey(ownerGvk.Group, ownerGvk.Kind, r.Ref.Namespace, ownerRef.Name)
|
||||
if parent, ok := namespaceResources[parentKey]; ok {
|
||||
parentRef.Namespace = parent.Ref.Namespace
|
||||
} else {
|
||||
// Not in namespace => must be cluster-scoped
|
||||
parentRef.Namespace = ""
|
||||
// Debug logging for cross-namespace relationships
|
||||
if r.Ref.Namespace != "" {
|
||||
log.Debugf("Cross-namespace ref: %s/%s in namespace %s has parent %s/%s (cluster-scoped)",
|
||||
r.Ref.Kind, r.Ref.Name, r.Ref.Namespace, ownerGvk.Kind, ownerRef.Name)
|
||||
}
|
||||
}
|
||||
parentRefs[i] = parentRef
|
||||
}
|
||||
var resHealth *appv1.HealthStatus
|
||||
resourceInfo := resInfo(r)
|
||||
@@ -673,7 +689,7 @@ func (c *liveStateCache) IterateHierarchyV2(server *appv1.Cluster, keys []kube.R
|
||||
return err
|
||||
}
|
||||
clusterInfo.IterateHierarchyV2(keys, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) bool {
|
||||
return action(asResourceNode(resource), getApp(resource, namespaceResources))
|
||||
return action(asResourceNode(resource, namespaceResources), getApp(resource, namespaceResources))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@@ -698,9 +714,15 @@ func (c *liveStateCache) GetNamespaceTopLevelResources(server *appv1.Cluster, na
|
||||
return nil, err
|
||||
}
|
||||
resources := clusterInfo.FindResources(namespace, clustercache.TopLevelResource)
|
||||
|
||||
// Get all namespace resources for parent lookups
|
||||
namespaceResources := clusterInfo.FindResources(namespace, func(_ *clustercache.Resource) bool {
|
||||
return true
|
||||
})
|
||||
|
||||
res := make(map[kube.ResourceKey]appv1.ResourceNode)
|
||||
for k, r := range resources {
|
||||
res[k] = asResourceNode(r)
|
||||
res[k] = asResourceNode(r, namespaceResources)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
112
controller/cache/cache_test.go
vendored
112
controller/cache/cache_test.go
vendored
@@ -323,7 +323,7 @@ func Test_asResourceNode_owner_refs(t *testing.T) {
|
||||
CreationTimestamp: nil,
|
||||
Info: nil,
|
||||
Resource: nil,
|
||||
})
|
||||
}, nil)
|
||||
expected := appv1.ResourceNode{
|
||||
ResourceRef: appv1.ResourceRef{
|
||||
Version: "v1",
|
||||
@@ -842,3 +842,113 @@ func Test_ownerRefGV(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_asResourceNode_cross_namespace_parent(t *testing.T) {
|
||||
// Test that a namespaced resource with a cluster-scoped parent
|
||||
// correctly sets the parent namespace to empty string
|
||||
|
||||
// Create a Role (namespaced) with an owner reference to a ClusterRole (cluster-scoped)
|
||||
roleResource := &cache.Resource{
|
||||
Ref: corev1.ObjectReference{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "Role",
|
||||
Namespace: "my-namespace",
|
||||
Name: "my-role",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
Name: "my-cluster-role",
|
||||
UID: "cluster-role-uid",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create namespace resources map (ClusterRole won't be in here since it's cluster-scoped)
|
||||
namespaceResources := map[kube.ResourceKey]*cache.Resource{
|
||||
// Add some other namespace resources but not the ClusterRole
|
||||
{
|
||||
Group: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Namespace: "my-namespace",
|
||||
Name: "other-role",
|
||||
}: {
|
||||
Ref: corev1.ObjectReference{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "Role",
|
||||
Namespace: "my-namespace",
|
||||
Name: "other-role",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resNode := asResourceNode(roleResource, namespaceResources)
|
||||
|
||||
// The parent reference should have empty namespace since ClusterRole is cluster-scoped
|
||||
assert.Len(t, resNode.ParentRefs, 1)
|
||||
assert.Equal(t, "ClusterRole", resNode.ParentRefs[0].Kind)
|
||||
assert.Equal(t, "my-cluster-role", resNode.ParentRefs[0].Name)
|
||||
assert.Empty(t, resNode.ParentRefs[0].Namespace, "ClusterRole parent should have empty namespace")
|
||||
}
|
||||
|
||||
func Test_asResourceNode_same_namespace_parent(t *testing.T) {
|
||||
// Test that a namespaced resource with a namespaced parent in the same namespace
|
||||
// correctly sets the parent namespace
|
||||
|
||||
// Create a ReplicaSet with an owner reference to a Deployment (both namespaced)
|
||||
rsResource := &cache.Resource{
|
||||
Ref: corev1.ObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "my-namespace",
|
||||
Name: "my-rs",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "my-deployment",
|
||||
UID: "deployment-uid",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create namespace resources map with the Deployment
|
||||
deploymentKey := kube.ResourceKey{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
Namespace: "my-namespace",
|
||||
Name: "my-deployment",
|
||||
}
|
||||
namespaceResources := map[kube.ResourceKey]*cache.Resource{
|
||||
deploymentKey: {
|
||||
Ref: corev1.ObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Namespace: "my-namespace",
|
||||
Name: "my-deployment",
|
||||
UID: "deployment-uid",
|
||||
},
|
||||
Resource: &unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]any{
|
||||
"name": "my-deployment",
|
||||
"namespace": "my-namespace",
|
||||
"uid": "deployment-uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resNode := asResourceNode(rsResource, namespaceResources)
|
||||
|
||||
// The parent reference should have the same namespace
|
||||
assert.Len(t, resNode.ParentRefs, 1)
|
||||
assert.Equal(t, "Deployment", resNode.ParentRefs[0].Kind)
|
||||
assert.Equal(t, "my-deployment", resNode.ParentRefs[0].Name)
|
||||
assert.Equal(t, "my-namespace", resNode.ParentRefs[0].Namespace, "Deployment parent should have same namespace")
|
||||
}
|
||||
|
||||
291
gitops-engine/pkg/cache/cluster.go
vendored
291
gitops-engine/pkg/cache/cluster.go
vendored
@@ -1,3 +1,27 @@
|
||||
// Package cache provides a caching layer for Kubernetes cluster resources with support for
|
||||
// hierarchical parent-child relationships, including cross-namespace relationships between
|
||||
// cluster-scoped parents and namespaced children.
|
||||
//
|
||||
// The cache maintains:
|
||||
// - A complete index of all monitored resources in the cluster
|
||||
// - Hierarchical relationships between resources via owner references
|
||||
// - Cross-namespace relationships from cluster-scoped resources to namespaced children
|
||||
// - Efficient traversal of resource hierarchies for dependency analysis
|
||||
//
|
||||
// Key features:
|
||||
// - Watches cluster resources and maintains an in-memory cache synchronized with the cluster state
|
||||
// - Supports both same-namespace parent-child relationships and cross-namespace relationships
|
||||
// - Uses pre-computed indexes for efficient hierarchy traversal without full cluster scans
|
||||
// - Provides configurable namespaces and resource filtering
|
||||
// - Handles dynamic resource discovery including CRDs
|
||||
//
|
||||
// Cross-namespace hierarchy traversal:
|
||||
// The cache supports discovering namespaced resources that are owned by cluster-scoped resources.
|
||||
// This is essential for tracking resources like namespaced Deployments owned by cluster-scoped
|
||||
// custom resources.
|
||||
//
|
||||
// The parentUIDToChildren index enables efficient O(1) cross-namespace traversal by mapping
|
||||
// any resource's UID to its direct children, eliminating the need for O(n) graph building.
|
||||
package cache
|
||||
|
||||
import (
|
||||
@@ -184,9 +208,10 @@ func NewClusterCache(config *rest.Config, opts ...UpdateSettingsFunc) *clusterCa
|
||||
eventHandlers: map[uint64]OnEventHandler{},
|
||||
processEventsHandlers: map[uint64]OnProcessEventsHandler{},
|
||||
log: log,
|
||||
listRetryLimit: 1,
|
||||
listRetryUseBackoff: false,
|
||||
listRetryFunc: ListRetryFuncNever,
|
||||
listRetryLimit: 1,
|
||||
listRetryUseBackoff: false,
|
||||
listRetryFunc: ListRetryFuncNever,
|
||||
parentUIDToChildren: make(map[types.UID][]kube.ResourceKey),
|
||||
}
|
||||
for i := range opts {
|
||||
opts[i](cache)
|
||||
@@ -245,6 +270,11 @@ type clusterCache struct {
|
||||
gvkParser *managedfields.GvkParser
|
||||
|
||||
respectRBAC int
|
||||
|
||||
// Parent-to-children index for O(1) hierarchy traversal
|
||||
// Maps any resource's UID to its direct children's ResourceKeys
|
||||
// Eliminates need for O(n) graph building during hierarchy traversal
|
||||
parentUIDToChildren map[types.UID][]kube.ResourceKey
|
||||
}
|
||||
|
||||
type clusterCacheSync struct {
|
||||
@@ -444,6 +474,10 @@ func (c *clusterCache) newResource(un *unstructured.Unstructured) *Resource {
|
||||
|
||||
func (c *clusterCache) setNode(n *Resource) {
|
||||
key := n.ResourceKey()
|
||||
|
||||
// Keep track of existing resource for index updates
|
||||
existing := c.resources[key]
|
||||
|
||||
c.resources[key] = n
|
||||
ns, ok := c.nsIndex[key.Namespace]
|
||||
if !ok {
|
||||
@@ -452,6 +486,10 @@ func (c *clusterCache) setNode(n *Resource) {
|
||||
}
|
||||
ns[key] = n
|
||||
|
||||
// Update parent-to-children index for all resources with owner refs
|
||||
// This is always done, regardless of sync state, as it's cheap to maintain
|
||||
c.updateParentUIDToChildren(key, existing, n)
|
||||
|
||||
// update inferred parent references
|
||||
if n.isInferredParentOf != nil || mightHaveInferredOwner(n) {
|
||||
for k, v := range ns {
|
||||
@@ -466,6 +504,88 @@ func (c *clusterCache) setNode(n *Resource) {
|
||||
}
|
||||
}
|
||||
|
||||
// rebuildParentToChildrenIndex rebuilds the parent-to-children index after a full sync
|
||||
// This is called after initial sync to ensure all parent-child relationships are tracked
|
||||
func (c *clusterCache) rebuildParentToChildrenIndex() {
|
||||
// Clear existing index
|
||||
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
|
||||
|
||||
// Rebuild parent-to-children index from all resources with owner refs
|
||||
for _, resource := range c.resources {
|
||||
key := resource.ResourceKey()
|
||||
for _, ownerRef := range resource.OwnerRefs {
|
||||
if ownerRef.UID != "" {
|
||||
c.addToParentUIDToChildren(ownerRef.UID, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// addToParentUIDToChildren adds a child to the parent-to-children index
|
||||
func (c *clusterCache) addToParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
|
||||
// Check if child is already in the list to avoid duplicates
|
||||
children := c.parentUIDToChildren[parentUID]
|
||||
for _, existing := range children {
|
||||
if existing == childKey {
|
||||
return // Already exists, no need to add
|
||||
}
|
||||
}
|
||||
c.parentUIDToChildren[parentUID] = append(children, childKey)
|
||||
}
|
||||
|
||||
// removeFromParentUIDToChildren removes a child from the parent-to-children index
|
||||
func (c *clusterCache) removeFromParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
|
||||
children := c.parentUIDToChildren[parentUID]
|
||||
for i, existing := range children {
|
||||
if existing == childKey {
|
||||
// Remove by swapping with last element and truncating
|
||||
children[i] = children[len(children)-1]
|
||||
c.parentUIDToChildren[parentUID] = children[:len(children)-1]
|
||||
|
||||
// Clean up empty entries
|
||||
if len(c.parentUIDToChildren[parentUID]) == 0 {
|
||||
delete(c.parentUIDToChildren, parentUID)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateParentUIDToChildren updates the parent-to-children index when a resource's owner refs change
|
||||
func (c *clusterCache) updateParentUIDToChildren(childKey kube.ResourceKey, oldResource *Resource, newResource *Resource) {
|
||||
// Build sets of old and new parent UIDs
|
||||
oldParents := make(map[types.UID]struct{})
|
||||
if oldResource != nil {
|
||||
for _, ref := range oldResource.OwnerRefs {
|
||||
if ref.UID != "" {
|
||||
oldParents[ref.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newParents := make(map[types.UID]struct{})
|
||||
for _, ref := range newResource.OwnerRefs {
|
||||
if ref.UID != "" {
|
||||
newParents[ref.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from parents that are no longer in owner refs
|
||||
for oldUID := range oldParents {
|
||||
if _, exists := newParents[oldUID]; !exists {
|
||||
c.removeFromParentUIDToChildren(oldUID, childKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Add to parents that are new in owner refs
|
||||
for newUID := range newParents {
|
||||
if _, exists := oldParents[newUID]; !exists {
|
||||
c.addToParentUIDToChildren(newUID, childKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate cache and executes callback that optionally might update cache settings
|
||||
func (c *clusterCache) Invalidate(opts ...UpdateSettingsFunc) {
|
||||
c.lock.Lock()
|
||||
@@ -885,6 +1005,7 @@ func (c *clusterCache) sync() error {
|
||||
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
|
||||
c.resources = make(map[kube.ResourceKey]*Resource)
|
||||
c.namespacedResources = make(map[schema.GroupKind]bool)
|
||||
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
|
||||
config := c.config
|
||||
version, err := c.kubectl.GetServerVersion(config)
|
||||
if err != nil {
|
||||
@@ -983,6 +1104,9 @@ func (c *clusterCache) sync() error {
|
||||
return fmt.Errorf("failed to sync cluster %s: %w", c.config.Host, err)
|
||||
}
|
||||
|
||||
// Rebuild orphaned children index after all resources are loaded
|
||||
c.rebuildParentToChildrenIndex()
|
||||
|
||||
c.log.Info("Cluster successfully synced")
|
||||
return nil
|
||||
}
|
||||
@@ -1055,10 +1179,17 @@ func (c *clusterCache) FindResources(namespace string, predicates ...func(r *Res
|
||||
return result
|
||||
}
|
||||
|
||||
// IterateHierarchy iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree
|
||||
// IterateHierarchyV2 iterates through the hierarchy of resources starting from the given keys.
|
||||
// It efficiently traverses parent-child relationships, including cross-namespace relationships
|
||||
// between cluster-scoped parents and namespaced children, using pre-computed indexes.
|
||||
func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
// Track visited resources to avoid cycles
|
||||
visited := make(map[kube.ResourceKey]int)
|
||||
|
||||
// Group keys by namespace for efficient processing
|
||||
keysPerNamespace := make(map[string][]kube.ResourceKey)
|
||||
for _, key := range keys {
|
||||
_, ok := c.resources[key]
|
||||
@@ -1067,35 +1198,125 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
|
||||
}
|
||||
keysPerNamespace[key.Namespace] = append(keysPerNamespace[key.Namespace], key)
|
||||
}
|
||||
|
||||
// Process namespaced resources with standard hierarchy
|
||||
for namespace, namespaceKeys := range keysPerNamespace {
|
||||
nsNodes := c.nsIndex[namespace]
|
||||
graph := buildGraph(nsNodes)
|
||||
visited := make(map[kube.ResourceKey]int)
|
||||
for _, key := range namespaceKeys {
|
||||
visited[key] = 0
|
||||
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, visited, action)
|
||||
}
|
||||
|
||||
// Process pre-computed cross-namespace children
|
||||
if clusterKeys, ok := keysPerNamespace[""]; ok {
|
||||
c.processCrossNamespaceChildren(clusterKeys, visited, action)
|
||||
}
|
||||
}
|
||||
|
||||
// processCrossNamespaceChildren processes namespaced children of cluster-scoped resources
|
||||
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries
|
||||
func (c *clusterCache) processCrossNamespaceChildren(
|
||||
clusterScopedKeys []kube.ResourceKey,
|
||||
visited map[kube.ResourceKey]int,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
for _, clusterKey := range clusterScopedKeys {
|
||||
// Get cluster-scoped resource to access its UID
|
||||
clusterResource := c.resources[clusterKey]
|
||||
if clusterResource == nil {
|
||||
continue
|
||||
}
|
||||
for _, key := range namespaceKeys {
|
||||
// The check for existence of key is done above.
|
||||
res := c.resources[key]
|
||||
if visited[key] == 2 || !action(res, nsNodes) {
|
||||
|
||||
// Use parent-to-children index for O(1) lookup of direct children
|
||||
childKeys := c.parentUIDToChildren[clusterResource.Ref.UID]
|
||||
for _, childKey := range childKeys {
|
||||
child := c.resources[childKey]
|
||||
if child == nil || visited[childKey] != 0 {
|
||||
continue
|
||||
}
|
||||
visited[key] = 1
|
||||
if _, ok := graph[key]; ok {
|
||||
for _, child := range graph[key] {
|
||||
if visited[child.ResourceKey()] == 0 && action(child, nsNodes) {
|
||||
child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
|
||||
if err != nil {
|
||||
c.log.V(2).Info(err.Error())
|
||||
return false
|
||||
}
|
||||
return action(child, namespaceResources)
|
||||
})
|
||||
}
|
||||
|
||||
// Get namespace nodes for this child
|
||||
nsNodes := c.nsIndex[childKey.Namespace]
|
||||
if nsNodes == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process this child
|
||||
if action(child, nsNodes) {
|
||||
visited[childKey] = 1
|
||||
// Recursively process descendants using index-based traversal
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
|
||||
visited[childKey] = 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// iterateChildrenUsingIndex recursively processes a resource's children using the parentUIDToChildren index
|
||||
// This replaces graph-based traversal with O(1) index lookups
|
||||
func (c *clusterCache) iterateChildrenUsingIndex(
|
||||
parent *Resource,
|
||||
nsNodes map[kube.ResourceKey]*Resource,
|
||||
visited map[kube.ResourceKey]int,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
// Look up direct children of this parent using the index
|
||||
childKeys := c.parentUIDToChildren[parent.Ref.UID]
|
||||
for _, childKey := range childKeys {
|
||||
if visited[childKey] != 0 {
|
||||
continue // Already visited or in progress
|
||||
}
|
||||
|
||||
child := c.resources[childKey]
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only process children in the same namespace (for within-namespace traversal)
|
||||
// Cross-namespace children are handled by the outer loop in processCrossNamespaceChildren
|
||||
if child.Ref.Namespace != parent.Ref.Namespace {
|
||||
continue
|
||||
}
|
||||
|
||||
if action(child, nsNodes) {
|
||||
visited[childKey] = 1
|
||||
// Recursively process this child's descendants
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
|
||||
visited[childKey] = 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processNamespaceHierarchy processes hierarchy for keys within a single namespace
|
||||
func (c *clusterCache) processNamespaceHierarchy(
|
||||
namespaceKeys []kube.ResourceKey,
|
||||
nsNodes map[kube.ResourceKey]*Resource,
|
||||
graph map[kube.ResourceKey]map[types.UID]*Resource,
|
||||
visited map[kube.ResourceKey]int,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
for _, key := range namespaceKeys {
|
||||
visited[key] = 0
|
||||
}
|
||||
for _, key := range namespaceKeys {
|
||||
res := c.resources[key]
|
||||
if visited[key] == 2 || !action(res, nsNodes) {
|
||||
continue
|
||||
}
|
||||
visited[key] = 1
|
||||
if _, ok := graph[key]; ok {
|
||||
for _, child := range graph[key] {
|
||||
if visited[child.ResourceKey()] == 0 && action(child, nsNodes) {
|
||||
child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
|
||||
if err != nil {
|
||||
c.log.V(2).Info(err.Error())
|
||||
return false
|
||||
}
|
||||
return action(child, namespaceResources)
|
||||
})
|
||||
}
|
||||
}
|
||||
visited[key] = 2
|
||||
}
|
||||
visited[key] = 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1106,7 +1327,7 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
|
||||
nodesByUID[node.Ref.UID] = append(nodesByUID[node.Ref.UID], node)
|
||||
}
|
||||
|
||||
// In graph, they key is the parent and the value is a list of children.
|
||||
// In graph, the key is the parent and the value is a list of children.
|
||||
graph := make(map[kube.ResourceKey]map[types.UID]*Resource)
|
||||
|
||||
// Loop through all nodes, calling each one "childNode," because we're only bothering with it if it has a parent.
|
||||
@@ -1132,20 +1353,22 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
|
||||
uidNodes, ok := nodesByUID[ownerRef.UID]
|
||||
if ok {
|
||||
for _, uidNode := range uidNodes {
|
||||
// Cache ResourceKey() to avoid repeated expensive calls
|
||||
uidNodeKey := uidNode.ResourceKey()
|
||||
// Update the graph for this owner to include the child.
|
||||
if _, ok := graph[uidNode.ResourceKey()]; !ok {
|
||||
graph[uidNode.ResourceKey()] = make(map[types.UID]*Resource)
|
||||
if _, ok := graph[uidNodeKey]; !ok {
|
||||
graph[uidNodeKey] = make(map[types.UID]*Resource)
|
||||
}
|
||||
r, ok := graph[uidNode.ResourceKey()][childNode.Ref.UID]
|
||||
r, ok := graph[uidNodeKey][childNode.Ref.UID]
|
||||
if !ok {
|
||||
graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode
|
||||
graph[uidNodeKey][childNode.Ref.UID] = childNode
|
||||
} else if r != nil {
|
||||
// The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group).
|
||||
// It is ok to pick any object, but we need to make sure we pick the same child after every refresh.
|
||||
key1 := r.ResourceKey()
|
||||
key2 := childNode.ResourceKey()
|
||||
if strings.Compare(key1.String(), key2.String()) > 0 {
|
||||
graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode
|
||||
graph[uidNodeKey][childNode.Ref.UID] = childNode
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1365,6 +1588,14 @@ func (c *clusterCache) onNodeRemoved(key kube.ResourceKey) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up parent-to-children index
|
||||
for _, ownerRef := range existing.OwnerRefs {
|
||||
if ownerRef.UID != "" {
|
||||
c.removeFromParentUIDToChildren(ownerRef.UID, key)
|
||||
}
|
||||
}
|
||||
|
||||
for _, h := range c.getResourceUpdatedHandlers() {
|
||||
h(nil, existing, ns)
|
||||
}
|
||||
|
||||
703
gitops-engine/pkg/cache/cluster_test.go
vendored
703
gitops-engine/pkg/cache/cluster_test.go
vendored
@@ -19,8 +19,10 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -91,11 +93,11 @@ func newClusterWithOptions(_ testing.TB, opts []UpdateSettingsFunc, objs ...runt
|
||||
client.PrependReactor("list", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) {
|
||||
handled, ret, err = reactor.React(action)
|
||||
if err != nil || !handled {
|
||||
return
|
||||
return handled, ret, fmt.Errorf("reactor failed: %w", err)
|
||||
}
|
||||
// make sure list response have resource version
|
||||
ret.(metav1.ListInterface).SetResourceVersion("123")
|
||||
return
|
||||
return handled, ret, nil
|
||||
})
|
||||
|
||||
apiResources := []kube.APIResourceInfo{{
|
||||
@@ -189,6 +191,104 @@ func Benchmark_sync(t *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_sync_CrossNamespace tests sync performance with cross-namespace relationships.
|
||||
// This measures the one-time cost of building cross-namespace indexes during cache synchronization.
|
||||
func Benchmark_sync_CrossNamespace(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNamespaces int
|
||||
resourcesPerNamespace int
|
||||
namespacesWithCrossNS int // Number of namespaces with cross-NS children
|
||||
crossNSResourcesPerNamespace int // Cross-NS children in each affected namespace
|
||||
}{
|
||||
// Baseline
|
||||
{"50NS_0pct_100perNS", 50, 100, 0, 0},
|
||||
|
||||
// Primary dimension: Percentage of namespaces with cross-NS children
|
||||
{"50NS_2pct_100perNS", 50, 100, 1, 10},
|
||||
{"50NS_10pct_100perNS", 50, 100, 5, 10},
|
||||
{"50NS_20pct_100perNS", 50, 100, 10, 10},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
|
||||
resources := []runtime.Object{}
|
||||
|
||||
// Create cluster-scoped parents (ClusterRoles)
|
||||
numClusterParents := 100
|
||||
clusterUIDs := make(map[string]types.UID)
|
||||
for i := 0; i < numClusterParents; i++ {
|
||||
uid := types.UID(fmt.Sprintf("cluster-uid-%d", i))
|
||||
clusterUIDs[fmt.Sprintf("cluster-role-%d", i)] = uid
|
||||
resources = append(resources, &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("cluster-role-%d", i),
|
||||
UID: uid,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Create namespaced resources
|
||||
for ns := 0; ns < tc.totalNamespaces; ns++ {
|
||||
namespace := fmt.Sprintf("namespace-%d", ns)
|
||||
hasCrossNS := ns < tc.namespacesWithCrossNS
|
||||
regularPods := tc.resourcesPerNamespace
|
||||
crossNSPods := 0
|
||||
|
||||
if hasCrossNS {
|
||||
regularPods = tc.resourcesPerNamespace - tc.crossNSResourcesPerNamespace
|
||||
crossNSPods = tc.crossNSResourcesPerNamespace
|
||||
}
|
||||
|
||||
// Regular pods without cross-namespace parents
|
||||
for i := 0; i < regularPods; i++ {
|
||||
resources = append(resources, &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%d", i),
|
||||
Namespace: namespace,
|
||||
UID: types.UID(fmt.Sprintf("pod-uid-%d-%d", ns, i)),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Pods with cross-namespace parents
|
||||
for i := 0; i < crossNSPods; i++ {
|
||||
clusterRoleName := fmt.Sprintf("cluster-role-%d", i%numClusterParents)
|
||||
resources = append(resources, &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("cross-ns-pod-%d", i),
|
||||
Namespace: namespace,
|
||||
UID: types.UID(fmt.Sprintf("cross-ns-pod-uid-%d-%d", ns, i)),
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRoleName,
|
||||
UID: clusterUIDs[clusterRoleName],
|
||||
}},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Need to add API resources for ClusterRole
|
||||
c := newCluster(b, resources...).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
err := c.sync()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureSynced(t *testing.T) {
|
||||
obj1 := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -302,12 +402,16 @@ func TestStatefulSetOwnershipInferred(t *testing.T) {
|
||||
tc.cluster.lock.Lock()
|
||||
defer tc.cluster.lock.Unlock()
|
||||
|
||||
refs := tc.cluster.resources[kube.GetResourceKey(pvc)].OwnerRefs
|
||||
resource := tc.cluster.resources[kube.GetResourceKey(pvc)]
|
||||
if resource == nil {
|
||||
return false // Resource not ready yet, keep retrying
|
||||
}
|
||||
refs := resource.OwnerRefs
|
||||
if tc.expectNoOwner {
|
||||
return len(refs) == 0
|
||||
}
|
||||
return assert.ElementsMatch(t, refs, tc.expectedRefs)
|
||||
}, 5*time.Second, 10*time.Millisecond, "Expected PVC to have correct owner reference")
|
||||
}, 5*time.Second, 20*time.Millisecond, "Expected PVC to have correct owner reference")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1045,7 +1149,7 @@ func testDeploy() *appsv1.Deployment {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIterateHierachyV2(t *testing.T) {
|
||||
func TestIterateHierarchyV2(t *testing.T) {
|
||||
cluster := newCluster(t, testPod1(), testPod2(), testRS(), testExtensionsRS(), testDeploy())
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
@@ -1157,6 +1261,307 @@ func TestIterateHierachyV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func testClusterParent() *corev1.Namespace {
|
||||
return &corev1.Namespace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
ResourceVersion: "123",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testNamespacedChild() *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "namespaced-child",
|
||||
Namespace: "test-namespace",
|
||||
UID: "namespaced-child-456",
|
||||
ResourceVersion: "123",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testClusterChild() *rbacv1.ClusterRole {
|
||||
return &rbacv1.ClusterRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-child",
|
||||
UID: "cluster-child-789",
|
||||
ResourceVersion: "123",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func TestIterateHierarchyV2_ClusterScopedParent_FindsAllChildren(t *testing.T) {
|
||||
// Test that cluster-scoped parents automatically find all their children (both cluster-scoped and namespaced)
|
||||
// This is the core behavior of the new implementation - cross-namespace relationships are always tracked
|
||||
cluster := newCluster(t, testClusterParent(), testNamespacedChild(), testClusterChild()).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
keys := []kube.ResourceKey{}
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(testClusterParent()))},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
keys = append(keys, resource.ResourceKey())
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Should find the parent and all its children (both cluster-scoped and namespaced)
|
||||
expected := []kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterChild())),
|
||||
kube.GetResourceKey(mustToUnstructured(testNamespacedChild())),
|
||||
}
|
||||
assert.ElementsMatch(t, expected, keys)
|
||||
}
|
||||
|
||||
|
||||
func TestIterateHierarchyV2_ClusterScopedParentOnly_InferredUID(t *testing.T) {
|
||||
// Test that passing only a cluster-scoped parent finds children even with inferred UIDs.
|
||||
// This should never happen but we coded defensively for this case, and at worst it would link a child
|
||||
// to the wrong parent if there were multiple parents with the same name (i.e. deleted and recreated).
|
||||
namespacedChildNoUID := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "namespaced-child-no-uid",
|
||||
Namespace: "test-namespace",
|
||||
UID: "namespaced-child-789",
|
||||
ResourceVersion: "123",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
// Note: No UID here - will need to be inferred
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, testClusterParent(), namespacedChildNoUID, testClusterChild()).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
keys := []kube.ResourceKey{}
|
||||
// Test with all namespaces - need to pass both cluster parent and namespaced children
|
||||
// as explicit keys to find them all
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(namespacedChildNoUID)),
|
||||
},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
keys = append(keys, resource.ResourceKey())
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Should find the parent and all its children, even with inferred UID
|
||||
assert.ElementsMatch(t, []kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(namespacedChildNoUID)),
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterChild())),
|
||||
}, keys)
|
||||
}
|
||||
|
||||
func TestOrphanedChildrenCleanup(t *testing.T) {
|
||||
// Test that parent-to-children index is properly cleaned up when resources are deleted
|
||||
clusterParent := testClusterParent()
|
||||
namespacedChild := testNamespacedChild()
|
||||
|
||||
cluster := newCluster(t, clusterParent, namespacedChild).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify child is tracked in parentUIDToChildren index
|
||||
cluster.lock.RLock()
|
||||
childKey := kube.GetResourceKey(mustToUnstructured(namespacedChild))
|
||||
parentUID := clusterParent.GetUID()
|
||||
|
||||
// Check that the child is in parentUIDToChildren
|
||||
children, ok := cluster.parentUIDToChildren[parentUID]
|
||||
require.True(t, ok, "parent should have entry in parentUIDToChildren")
|
||||
require.Contains(t, children, childKey, "child should be in parent's children list")
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Delete the child
|
||||
cluster.lock.Lock()
|
||||
cluster.onNodeRemoved(childKey)
|
||||
cluster.lock.Unlock()
|
||||
|
||||
// Verify cleanup: child removed from parentUIDToChildren
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.NotContains(t, children, childKey, "child should be removed from parent's children list")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Re-add the child and verify it re-populates correctly
|
||||
cluster.lock.Lock()
|
||||
cluster.setNode(cluster.newResource(mustToUnstructured(namespacedChild)))
|
||||
cluster.lock.Unlock()
|
||||
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
require.True(t, ok, "parent should be back in parentUIDToChildren")
|
||||
require.Contains(t, children, childKey, "child should be back in parent's children list")
|
||||
cluster.lock.RUnlock()
|
||||
}
|
||||
|
||||
func TestOrphanedChildrenIndex_OwnerRefLifecycle(t *testing.T) {
|
||||
// Test realistic scenarios of owner references being added and removed
|
||||
clusterParent := testClusterParent()
|
||||
|
||||
// Start with a child that has NO owner reference
|
||||
childNoOwner := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child-no-owner",
|
||||
Namespace: "test-namespace",
|
||||
UID: "child-uid-123",
|
||||
ResourceVersion: "1",
|
||||
// No OwnerReferences
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, clusterParent, childNoOwner).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
childKey := kube.GetResourceKey(mustToUnstructured(childNoOwner))
|
||||
parentUID := clusterParent.GetUID()
|
||||
|
||||
// Verify child is NOT tracked initially (no owner ref)
|
||||
cluster.lock.RLock()
|
||||
children, ok := cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.NotContains(t, children, childKey, "child without owner ref should not be in parentUIDToChildren")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Simulate controller adding owner reference (e.g., adoption)
|
||||
childWithOwner := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child-no-owner",
|
||||
Namespace: "test-namespace",
|
||||
UID: "child-uid-123",
|
||||
ResourceVersion: "2",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster.lock.Lock()
|
||||
cluster.setNode(cluster.newResource(mustToUnstructured(childWithOwner)))
|
||||
cluster.lock.Unlock()
|
||||
|
||||
// Verify child is NOW tracked (owner ref added)
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
require.True(t, ok, "parent should have entry in parentUIDToChildren after adding owner ref")
|
||||
require.Contains(t, children, childKey, "child should be in parent's children list after adding owner ref")
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Simulate removing owner reference (e.g., parent deletion with orphanDependents: true)
|
||||
childWithoutOwnerAgain := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child-no-owner",
|
||||
Namespace: "test-namespace",
|
||||
UID: "child-uid-123",
|
||||
ResourceVersion: "3",
|
||||
// OwnerReferences removed
|
||||
},
|
||||
}
|
||||
|
||||
cluster.lock.Lock()
|
||||
cluster.setNode(cluster.newResource(mustToUnstructured(childWithoutOwnerAgain)))
|
||||
cluster.lock.Unlock()
|
||||
|
||||
// Verify child is NO LONGER tracked (owner ref removed)
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.NotContains(t, children, childKey, "child should be removed from parentUIDToChildren after removing owner ref")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Verify empty entry cleanup: parent entry should be cleaned up if it has no children
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.Empty(t, children, "parent's children list should be empty or cleaned up")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
}
|
||||
|
||||
// Test_watchEvents_Deadlock validates that starting watches will not create a deadlock
|
||||
// caused by using improper locking in various callback methods when there is a high load on the
|
||||
// system.
|
||||
@@ -1292,3 +1697,291 @@ func BenchmarkIterateHierarchyV2(b *testing.B) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// buildClusterParentTestResourceMap creates test resources with configurable namespace distribution.
|
||||
// Parameters:
|
||||
// - clusterParents: number of cluster-scoped parent resources (ClusterRoles)
|
||||
// - totalNamespaces: total number of namespaces to create
|
||||
// - namespacesWithCrossNS: how many of those namespaces contain cross-namespace children
|
||||
// - resourcesPerNamespace: resources in each namespace
|
||||
// - crossNSResourcesPerNamespace: how many cross-namespace children in each affected namespace
|
||||
func buildClusterParentTestResourceMap(
|
||||
clusterParents, totalNamespaces, namespacesWithCrossNS, resourcesPerNamespace, crossNSResourcesPerNamespace int,
|
||||
) map[kube.ResourceKey]*Resource {
|
||||
resources := make(map[kube.ResourceKey]*Resource)
|
||||
|
||||
// Create cluster-scoped parents (ClusterRoles)
|
||||
clusterParentUIDs := make(map[string]string)
|
||||
for i := 0; i < clusterParents; i++ {
|
||||
clusterRoleName := fmt.Sprintf("cluster-role-%d", i)
|
||||
uid := uuid.New().String()
|
||||
clusterParentUIDs[clusterRoleName] = uid
|
||||
|
||||
key := kube.ResourceKey{
|
||||
Group: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Namespace: "",
|
||||
Name: clusterRoleName,
|
||||
}
|
||||
|
||||
resourceYaml := fmt.Sprintf(`
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: %s
|
||||
uid: %s
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list"]`, clusterRoleName, uid)
|
||||
|
||||
resources[key] = cacheTest.newResource(strToUnstructured(resourceYaml))
|
||||
}
|
||||
|
||||
// Generate namespace names
|
||||
namespaces := make([]string, totalNamespaces)
|
||||
for i := 0; i < totalNamespaces; i++ {
|
||||
namespaces[i] = fmt.Sprintf("ns-%d", i)
|
||||
}
|
||||
|
||||
// For each namespace
|
||||
for nsIdx, namespace := range namespaces {
|
||||
hasCrossNS := nsIdx < namespacesWithCrossNS
|
||||
regularPodsInNS := resourcesPerNamespace
|
||||
crossNSPodsInNS := 0
|
||||
|
||||
if hasCrossNS {
|
||||
regularPodsInNS = resourcesPerNamespace - crossNSResourcesPerNamespace
|
||||
crossNSPodsInNS = crossNSResourcesPerNamespace
|
||||
}
|
||||
|
||||
// Create regular namespaced resources (Pods)
|
||||
for i := 0; i < regularPodsInNS; i++ {
|
||||
name := fmt.Sprintf("pod-%s-%d", namespace, i)
|
||||
uid := uuid.New().String()
|
||||
|
||||
key := kube.ResourceKey{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
Kind: "Pod",
|
||||
}
|
||||
|
||||
resourceYaml := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
namespace: %s
|
||||
name: %s
|
||||
uid: %s`, namespace, name, uid)
|
||||
|
||||
resources[key] = cacheTest.newResource(strToUnstructured(resourceYaml))
|
||||
}
|
||||
|
||||
// Create cross-namespace children if this namespace has them
|
||||
for i := 0; i < crossNSPodsInNS; i++ {
|
||||
podName := fmt.Sprintf("cross-ns-pod-%s-%d", namespace, i)
|
||||
clusterRoleIndex := i % clusterParents
|
||||
clusterRoleName := fmt.Sprintf("cluster-role-%d", clusterRoleIndex)
|
||||
parentUID := clusterParentUIDs[clusterRoleName]
|
||||
uid := uuid.New().String()
|
||||
|
||||
key := kube.ResourceKey{
|
||||
Namespace: namespace,
|
||||
Name: podName,
|
||||
Kind: "Pod",
|
||||
}
|
||||
|
||||
resourceYaml := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: %s
|
||||
namespace: %s
|
||||
uid: %s
|
||||
ownerReferences:
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
name: %s
|
||||
uid: %s`, podName, namespace, uid, clusterRoleName, parentUID)
|
||||
|
||||
resources[key] = cacheTest.newResource(strToUnstructured(resourceYaml))
|
||||
}
|
||||
}
|
||||
|
||||
return resources
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// BenchmarkIterateHierarchyV2_ClusterParentTraversal benchmarks full hierarchy traversal
|
||||
// starting from cluster-scoped parents with varying percentages of namespaces containing
|
||||
// cross-namespace children. This tests the actual performance impact of the cross-namespace
|
||||
// relationship tracking feature.
|
||||
func BenchmarkIterateHierarchyV2_ClusterParentTraversal(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNamespaces int
|
||||
resourcesPerNamespace int
|
||||
namespacesWithCrossNS int // Number of namespaces with cross-NS children
|
||||
crossNSResourcesPerNamespace int // Cross-NS children in each affected namespace
|
||||
}{
|
||||
// Baseline: 0% of namespaces have cross-NS children
|
||||
{"50NS_0pct_100perNS", 50, 100, 0, 0},
|
||||
|
||||
// Primary dimension: Percentage of namespaces with cross-NS children
|
||||
// 5,000 total resources (50 NS × 100 resources/NS), 10 cross-NS children per affected namespace
|
||||
{"50NS_2pct_100perNS_10cross", 50, 100, 1, 10}, // 2% of namespaces (1/50)
|
||||
{"50NS_4pct_100perNS_10cross", 50, 100, 2, 10}, // 4% of namespaces (2/50)
|
||||
{"50NS_10pct_100perNS_10cross", 50, 100, 5, 10}, // 10% of namespaces (5/50)
|
||||
{"50NS_20pct_100perNS_10cross", 50, 100, 10, 10}, // 20% of namespaces (10/50)
|
||||
|
||||
// Secondary dimension: Within a namespace, % of resources that are cross-NS
|
||||
// 5,000 total resources, 2% of namespaces (1/50) have cross-NS children
|
||||
{"50NS_2pct_100perNS_10cross", 50, 100, 1, 10}, // 10% of namespace resources (10/100)
|
||||
{"50NS_2pct_100perNS_25cross", 50, 100, 1, 25}, // 25% of namespace resources (25/100)
|
||||
{"50NS_2pct_100perNS_50cross", 50, 100, 1, 50}, // 50% of namespace resources (50/100)
|
||||
|
||||
// Edge cases
|
||||
{"100NS_1pct_100perNS_10cross", 100, 100, 1, 10}, // 1% of namespaces (1/100) - extreme clustering
|
||||
{"50NS_100pct_100perNS_10cross", 50, 100, 50, 10}, // 100% of namespaces - worst case
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
|
||||
cluster := newCluster(b).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Pod"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}})
|
||||
|
||||
// CRITICAL: Initialize namespacedResources so setNode will populate orphanedChildren index
|
||||
cluster.namespacedResources = map[schema.GroupKind]bool{
|
||||
{Group: "", Kind: "Pod"}: true,
|
||||
{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: false,
|
||||
}
|
||||
|
||||
clusterParents := 100 // Fixed number of cluster-scoped resources
|
||||
|
||||
testResources := buildClusterParentTestResourceMap(
|
||||
clusterParents,
|
||||
tc.totalNamespaces,
|
||||
tc.namespacesWithCrossNS,
|
||||
tc.resourcesPerNamespace,
|
||||
tc.crossNSResourcesPerNamespace,
|
||||
)
|
||||
|
||||
// Add resources to cache - this will populate orphanedChildren index
|
||||
for _, resource := range testResources {
|
||||
cluster.setNode(resource)
|
||||
}
|
||||
|
||||
// Verify indexes are populated (sanity check)
|
||||
if tc.namespacesWithCrossNS > 0 {
|
||||
if len(cluster.parentUIDToChildren) == 0 {
|
||||
b.Fatal("parentUIDToChildren index not populated - benchmark setup is broken")
|
||||
}
|
||||
}
|
||||
|
||||
// Always start from a cluster-scoped parent to test cross-namespace traversal
|
||||
startKey := kube.ResourceKey{
|
||||
Group: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Namespace: "",
|
||||
Name: "cluster-role-0",
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
cluster.IterateHierarchyV2([]kube.ResourceKey{startKey}, func(_ *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func TestIterateHierarchyV2_NoDuplicatesInSameNamespace(t *testing.T) {
|
||||
// Create a parent-child relationship in the same namespace
|
||||
parent := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "parent", Namespace: "default", UID: "parent-uid",
|
||||
},
|
||||
}
|
||||
child := &appsv1.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "ReplicaSet"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child", Namespace: "default", UID: "child-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "apps/v1", Kind: "Deployment", Name: "parent", UID: "parent-uid",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, parent, child)
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
visitCount := make(map[string]int)
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(parent)),
|
||||
kube.GetResourceKey(mustToUnstructured(child)),
|
||||
},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
visitCount[resource.Ref.Name]++
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Each resource should be visited exactly once
|
||||
assert.Equal(t, 1, visitCount["parent"], "parent should be visited once")
|
||||
assert.Equal(t, 1, visitCount["child"], "child should be visited once")
|
||||
}
|
||||
|
||||
func TestIterateHierarchyV2_NoDuplicatesCrossNamespace(t *testing.T) {
|
||||
// Test that cross-namespace parent-child relationships don't cause duplicates
|
||||
visitCount := make(map[string]int)
|
||||
|
||||
cluster := newCluster(t, testClusterParent(), testNamespacedChild(), testClusterChild()).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(testNamespacedChild())),
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterChild())),
|
||||
},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
visitCount[resource.Ref.Name]++
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Each resource should be visited exactly once, even with cross-namespace relationships
|
||||
assert.Equal(t, 1, visitCount["test-cluster-parent"], "cluster parent should be visited once")
|
||||
assert.Equal(t, 1, visitCount["namespaced-child"], "namespaced child should be visited once")
|
||||
assert.Equal(t, 1, visitCount["cluster-child"], "cluster child should be visited once")
|
||||
}
|
||||
|
||||
2
gitops-engine/pkg/cache/mocks/ClusterCache.go
generated
vendored
2
gitops-engine/pkg/cache/mocks/ClusterCache.go
generated
vendored
@@ -563,7 +563,7 @@ func (_c *ClusterCache_IsNamespaced_Call) RunAndReturn(run func(gk schema.GroupK
|
||||
return _c
|
||||
}
|
||||
|
||||
// IterateHierarchyV2 provides a mock function for the type ClusterCache
|
||||
// IterateHierarchyV2 provides a mock function with given fields: keys, action, orphanedResourceNamespace
|
||||
func (_mock *ClusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *cache.Resource, namespaceResources map[kube.ResourceKey]*cache.Resource) bool) {
|
||||
_mock.Called(keys, action)
|
||||
return
|
||||
|
||||
6
gitops-engine/pkg/cache/resource.go
vendored
6
gitops-engine/pkg/cache/resource.go
vendored
@@ -91,9 +91,9 @@ func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*R
|
||||
if !ok || children == nil {
|
||||
return
|
||||
}
|
||||
for _, c := range children {
|
||||
childKey := c.ResourceKey()
|
||||
child := ns[childKey]
|
||||
for _, child := range children {
|
||||
childKey := child.ResourceKey()
|
||||
// For cross-namespace relationships, child might not be in ns, so use it directly from graph
|
||||
switch visited[childKey] {
|
||||
case 1:
|
||||
// Since we encountered a node that we're currently processing, we know we have a circular dependency.
|
||||
|
||||
259
test/e2e/cross_namespace_ownership_test.go
Normal file
259
test/e2e/cross_namespace_ownership_test.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
applicationpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
|
||||
clusterpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
. "github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
. "github.com/argoproj/argo-cd/v3/test/e2e/fixture/app"
|
||||
"github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
// TestCrossNamespaceOwnership tests that Argo CD correctly tracks parent-child relationships
|
||||
// when a cluster-scoped resource (ClusterRole) owns namespaced resources (Roles) across different namespaces.
|
||||
// This validates the fix for supporting cluster-scoped parents with namespaced children in resource trees.
|
||||
func TestCrossNamespaceOwnership(t *testing.T) {
|
||||
var clusterRoleUID string
|
||||
|
||||
Given(t).
|
||||
Path("cross-namespace-ownership").
|
||||
When().
|
||||
CreateApp().
|
||||
Sync().
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeSynced)).
|
||||
And(func(_ *v1alpha1.Application) {
|
||||
// Get the UID of the ClusterRole that was created
|
||||
output, err := Run("", "kubectl", "get", "clusterrole", "test-cluster-role",
|
||||
"-o", "jsonpath={.metadata.uid}")
|
||||
require.NoError(t, err)
|
||||
clusterRoleUID = output
|
||||
t.Logf("ClusterRole UID: %s", clusterRoleUID)
|
||||
}).
|
||||
When().
|
||||
And(func() {
|
||||
// Create a Role in the app's destination namespace with an ownerReference to the ClusterRole
|
||||
roleYaml := fmt.Sprintf(`apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: test-role-same-ns
|
||||
namespace: %s
|
||||
ownerReferences:
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
name: test-cluster-role
|
||||
uid: %s
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]`, DeploymentNamespace(), clusterRoleUID)
|
||||
|
||||
_, err := Run("", "sh", "-c", fmt.Sprintf("echo '%s' | kubectl apply -f -", roleYaml))
|
||||
require.NoError(t, err)
|
||||
t.Logf("Created Role in app namespace: %s", DeploymentNamespace())
|
||||
|
||||
// Create another namespace for cross-namespace testing
|
||||
otherNamespace := DeploymentNamespace() + "-other"
|
||||
_, err = Run("", "kubectl", "create", "namespace", otherNamespace)
|
||||
if err != nil {
|
||||
// Namespace might already exist, that's ok
|
||||
t.Logf("Namespace %s may already exist: %v", otherNamespace, err)
|
||||
}
|
||||
|
||||
// Create a Role in a different namespace with an ownerReference to the ClusterRole
|
||||
roleYaml2 := fmt.Sprintf(`apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: test-role-other-ns
|
||||
namespace: %s
|
||||
ownerReferences:
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
name: test-cluster-role
|
||||
uid: %s
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]`, otherNamespace, clusterRoleUID)
|
||||
|
||||
_, err = Run("", "sh", "-c", fmt.Sprintf("echo '%s' | kubectl apply -f -", roleYaml2))
|
||||
require.NoError(t, err)
|
||||
t.Logf("Created Role in other namespace: %s", otherNamespace)
|
||||
|
||||
// Give the cache a moment to pick up the changes
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Invalidate the cluster cache to force rebuild of orphaned children index
|
||||
t.Log("Invalidating cluster cache to rebuild orphaned children index...")
|
||||
closer, clusterClient, err := ArgoCDClientset.NewClusterClient()
|
||||
require.NoError(t, err)
|
||||
defer io.Close(closer)
|
||||
|
||||
// Invalidate cache for the default cluster (https://kubernetes.default.svc)
|
||||
cluster, err := clusterClient.InvalidateCache(context.Background(), &clusterpkg.ClusterQuery{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Warning: Failed to invalidate cache: %v", err)
|
||||
} else {
|
||||
t.Logf("Cache invalidated successfully, cluster status: %s", cluster.Info.ConnectionState.Status)
|
||||
}
|
||||
|
||||
// Wait for cache to rebuild
|
||||
time.Sleep(3 * time.Second)
|
||||
}).
|
||||
Refresh(v1alpha1.RefreshTypeHard). // Now refresh to get the updated resource tree
|
||||
Then().
|
||||
And(func(app *v1alpha1.Application) {
|
||||
// Now check the resource tree to verify both Roles show up as children of the ClusterRole
|
||||
closer, cdClient := ArgoCDClientset.NewApplicationClientOrDie()
|
||||
defer io.Close(closer)
|
||||
|
||||
tree, err := cdClient.ResourceTree(context.Background(), &applicationpkg.ResourcesQuery{
|
||||
ApplicationName: &app.Name,
|
||||
AppNamespace: &app.Namespace,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tree)
|
||||
|
||||
// Find the ClusterRole in the tree
|
||||
var clusterRoleNode *v1alpha1.ResourceNode
|
||||
for _, node := range tree.Nodes {
|
||||
if node.Kind == "ClusterRole" && node.Name == "test-cluster-role" {
|
||||
clusterRoleNode = &node
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotNil(t, clusterRoleNode, "ClusterRole not found in resource tree")
|
||||
t.Logf("Found ClusterRole in tree: %s, namespace: '%s'", clusterRoleNode.Name, clusterRoleNode.Namespace)
|
||||
|
||||
// Find both Roles and verify they reference the ClusterRole as their parent
|
||||
var roleSameNs, roleOtherNs *v1alpha1.ResourceNode
|
||||
for _, node := range tree.Nodes {
|
||||
if node.Kind == "Role" {
|
||||
t.Logf("Found Role: %s in namespace '%s' with parent refs: %v",
|
||||
node.Name, node.Namespace, node.ParentRefs)
|
||||
|
||||
// Log parent namespace values
|
||||
for _, parent := range node.ParentRefs {
|
||||
t.Logf(" Parent ref: Kind=%s, Name=%s, Namespace='%s'",
|
||||
parent.Kind, parent.Name, parent.Namespace)
|
||||
}
|
||||
|
||||
switch node.Name {
|
||||
case "test-role-same-ns":
|
||||
roleSameNs = &node
|
||||
case "test-role-other-ns":
|
||||
roleOtherNs = &node
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify both roles were found
|
||||
require.NotNil(t, roleSameNs, "Role in same namespace not found in resource tree")
|
||||
require.NotNil(t, roleOtherNs, "Role in other namespace not found in resource tree")
|
||||
|
||||
// Verify both roles have the ClusterRole as their parent
|
||||
assert.Len(t, roleSameNs.ParentRefs, 1, "Role in same namespace should have one parent")
|
||||
assert.Equal(t, "ClusterRole", roleSameNs.ParentRefs[0].Kind)
|
||||
assert.Equal(t, "test-cluster-role", roleSameNs.ParentRefs[0].Name)
|
||||
assert.Equal(t, string(clusterRoleUID), roleSameNs.ParentRefs[0].UID)
|
||||
|
||||
assert.Len(t, roleOtherNs.ParentRefs, 1, "Role in other namespace should have one parent")
|
||||
assert.Equal(t, "ClusterRole", roleOtherNs.ParentRefs[0].Kind)
|
||||
assert.Equal(t, "test-cluster-role", roleOtherNs.ParentRefs[0].Name)
|
||||
assert.Equal(t, string(clusterRoleUID), roleOtherNs.ParentRefs[0].UID)
|
||||
|
||||
t.Log("✓ Both Roles correctly show ClusterRole as their parent in the resource tree")
|
||||
}).
|
||||
When().
|
||||
Delete(true).
|
||||
Then().
|
||||
Expect(DoesNotExist())
|
||||
}
|
||||
|
||||
// TestCrossNamespaceOwnershipWithRefresh tests that cross-namespace relationships are maintained
|
||||
// after a cluster cache refresh/invalidation
|
||||
func TestCrossNamespaceOwnershipWithRefresh(t *testing.T) {
|
||||
var clusterRoleUID string
|
||||
|
||||
Given(t).
|
||||
Path("cross-namespace-ownership").
|
||||
When().
|
||||
CreateApp().
|
||||
Sync().
|
||||
Then().
|
||||
Expect(SyncStatusIs(v1alpha1.SyncStatusCodeSynced)).
|
||||
And(func(_ *v1alpha1.Application) {
|
||||
// Get the UID of the ClusterRole
|
||||
output, err := Run("", "kubectl", "get", "clusterrole", "test-cluster-role",
|
||||
"-o", "jsonpath={.metadata.uid}")
|
||||
require.NoError(t, err)
|
||||
clusterRoleUID = output
|
||||
}).
|
||||
When().
|
||||
And(func() {
|
||||
// Create a Role with an ownerReference to the ClusterRole
|
||||
roleYaml := fmt.Sprintf(`apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: test-role-refresh
|
||||
namespace: %s
|
||||
ownerReferences:
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
name: test-cluster-role
|
||||
uid: %s
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]`, DeploymentNamespace(), clusterRoleUID)
|
||||
|
||||
_, err := Run("", "sh", "-c", fmt.Sprintf("echo '%s' | kubectl apply -f -", roleYaml))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Give the cache a moment to pick up the changes
|
||||
time.Sleep(2 * time.Second)
|
||||
}).
|
||||
Refresh(v1alpha1.RefreshTypeHard). // Force a hard refresh to invalidate the cache
|
||||
Then().
|
||||
And(func(app *v1alpha1.Application) {
|
||||
// Verify the relationship is still tracked after refresh
|
||||
closer, cdClient := ArgoCDClientset.NewApplicationClientOrDie()
|
||||
defer io.Close(closer)
|
||||
|
||||
tree, err := cdClient.ResourceTree(context.Background(), &applicationpkg.ResourcesQuery{
|
||||
ApplicationName: &app.Name,
|
||||
AppNamespace: &app.Namespace,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Find the Role and verify it still has the ClusterRole as parent
|
||||
var roleNode *v1alpha1.ResourceNode
|
||||
for _, node := range tree.Nodes {
|
||||
if node.Kind == "Role" && node.Name == "test-role-refresh" {
|
||||
roleNode = &node
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NotNil(t, roleNode, "Role not found in resource tree after refresh")
|
||||
assert.Len(t, roleNode.ParentRefs, 1, "Role should have one parent after refresh")
|
||||
assert.Equal(t, "ClusterRole", roleNode.ParentRefs[0].Kind)
|
||||
assert.Equal(t, "test-cluster-role", roleNode.ParentRefs[0].Name)
|
||||
|
||||
t.Log("✓ Cross-namespace relationship maintained after cache refresh")
|
||||
}).
|
||||
When().
|
||||
Delete(true).
|
||||
Then().
|
||||
Expect(DoesNotExist())
|
||||
}
|
||||
10
test/e2e/testdata/cross-namespace-ownership/cluster-role.yaml
vendored
Normal file
10
test/e2e/testdata/cross-namespace-ownership/cluster-role.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: test-cluster-role
|
||||
labels:
|
||||
app.kubernetes.io/name: test-cluster-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list"]
|
||||
@@ -1085,7 +1085,8 @@ export const ApplicationResourceTree = (props: ApplicationResourceTreeProps) =>
|
||||
graph.setNode(treeNodeKey(root), {...root, width: NODE_WIDTH, height: NODE_HEIGHT, root});
|
||||
}
|
||||
(childrenByParentKey.get(treeNodeKey(root)) || []).forEach(child => {
|
||||
if (root.namespace === child.namespace) {
|
||||
// Draw edge if nodes are in same namespace OR if parent is cluster-scoped (no namespace)
|
||||
if (root.namespace === child.namespace || !root.namespace) {
|
||||
graph.setEdge(treeNodeKey(root), treeNodeKey(child), {colors: [colorByService.get(treeNodeKey(child))]});
|
||||
}
|
||||
});
|
||||
@@ -1191,7 +1192,9 @@ export const ApplicationResourceTree = (props: ApplicationResourceTreeProps) =>
|
||||
if (treeNodeKey(child) === treeNodeKey(root)) {
|
||||
return;
|
||||
}
|
||||
if (node.namespace === child.namespace) {
|
||||
// Draw edge if nodes are in same namespace OR if parent is cluster-scoped (empty/undefined namespace)
|
||||
const isParentClusterScoped = !node.namespace || node.namespace === '';
|
||||
if (node.namespace === child.namespace || isParentClusterScoped) {
|
||||
graph.setEdge(treeNodeKey(node), treeNodeKey(child), {colors});
|
||||
}
|
||||
processNode(child, root, colors);
|
||||
|
||||
Reference in New Issue
Block a user