mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 01:28:45 +01:00
feat: cross namespace hierarchy traversal from cluster-scoped parents to namespaced children (fixes #24379) (#24847)
Signed-off-by: Jonathan Ogilvie <jonathan.ogilvie@sumologic.com>
This commit is contained in:
291
gitops-engine/pkg/cache/cluster.go
vendored
291
gitops-engine/pkg/cache/cluster.go
vendored
@@ -1,3 +1,27 @@
|
||||
// Package cache provides a caching layer for Kubernetes cluster resources with support for
|
||||
// hierarchical parent-child relationships, including cross-namespace relationships between
|
||||
// cluster-scoped parents and namespaced children.
|
||||
//
|
||||
// The cache maintains:
|
||||
// - A complete index of all monitored resources in the cluster
|
||||
// - Hierarchical relationships between resources via owner references
|
||||
// - Cross-namespace relationships from cluster-scoped resources to namespaced children
|
||||
// - Efficient traversal of resource hierarchies for dependency analysis
|
||||
//
|
||||
// Key features:
|
||||
// - Watches cluster resources and maintains an in-memory cache synchronized with the cluster state
|
||||
// - Supports both same-namespace parent-child relationships and cross-namespace relationships
|
||||
// - Uses pre-computed indexes for efficient hierarchy traversal without full cluster scans
|
||||
// - Provides configurable namespaces and resource filtering
|
||||
// - Handles dynamic resource discovery including CRDs
|
||||
//
|
||||
// Cross-namespace hierarchy traversal:
|
||||
// The cache supports discovering namespaced resources that are owned by cluster-scoped resources.
|
||||
// This is essential for tracking resources like namespaced Deployments owned by cluster-scoped
|
||||
// custom resources.
|
||||
//
|
||||
// The parentUIDToChildren index enables efficient O(1) cross-namespace traversal by mapping
|
||||
// any resource's UID to its direct children, eliminating the need for O(n) graph building.
|
||||
package cache
|
||||
|
||||
import (
|
||||
@@ -184,9 +208,10 @@ func NewClusterCache(config *rest.Config, opts ...UpdateSettingsFunc) *clusterCa
|
||||
eventHandlers: map[uint64]OnEventHandler{},
|
||||
processEventsHandlers: map[uint64]OnProcessEventsHandler{},
|
||||
log: log,
|
||||
listRetryLimit: 1,
|
||||
listRetryUseBackoff: false,
|
||||
listRetryFunc: ListRetryFuncNever,
|
||||
listRetryLimit: 1,
|
||||
listRetryUseBackoff: false,
|
||||
listRetryFunc: ListRetryFuncNever,
|
||||
parentUIDToChildren: make(map[types.UID][]kube.ResourceKey),
|
||||
}
|
||||
for i := range opts {
|
||||
opts[i](cache)
|
||||
@@ -245,6 +270,11 @@ type clusterCache struct {
|
||||
gvkParser *managedfields.GvkParser
|
||||
|
||||
respectRBAC int
|
||||
|
||||
// Parent-to-children index for O(1) hierarchy traversal
|
||||
// Maps any resource's UID to its direct children's ResourceKeys
|
||||
// Eliminates need for O(n) graph building during hierarchy traversal
|
||||
parentUIDToChildren map[types.UID][]kube.ResourceKey
|
||||
}
|
||||
|
||||
type clusterCacheSync struct {
|
||||
@@ -444,6 +474,10 @@ func (c *clusterCache) newResource(un *unstructured.Unstructured) *Resource {
|
||||
|
||||
func (c *clusterCache) setNode(n *Resource) {
|
||||
key := n.ResourceKey()
|
||||
|
||||
// Keep track of existing resource for index updates
|
||||
existing := c.resources[key]
|
||||
|
||||
c.resources[key] = n
|
||||
ns, ok := c.nsIndex[key.Namespace]
|
||||
if !ok {
|
||||
@@ -452,6 +486,10 @@ func (c *clusterCache) setNode(n *Resource) {
|
||||
}
|
||||
ns[key] = n
|
||||
|
||||
// Update parent-to-children index for all resources with owner refs
|
||||
// This is always done, regardless of sync state, as it's cheap to maintain
|
||||
c.updateParentUIDToChildren(key, existing, n)
|
||||
|
||||
// update inferred parent references
|
||||
if n.isInferredParentOf != nil || mightHaveInferredOwner(n) {
|
||||
for k, v := range ns {
|
||||
@@ -466,6 +504,88 @@ func (c *clusterCache) setNode(n *Resource) {
|
||||
}
|
||||
}
|
||||
|
||||
// rebuildParentToChildrenIndex rebuilds the parent-to-children index after a full sync
|
||||
// This is called after initial sync to ensure all parent-child relationships are tracked
|
||||
func (c *clusterCache) rebuildParentToChildrenIndex() {
|
||||
// Clear existing index
|
||||
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
|
||||
|
||||
// Rebuild parent-to-children index from all resources with owner refs
|
||||
for _, resource := range c.resources {
|
||||
key := resource.ResourceKey()
|
||||
for _, ownerRef := range resource.OwnerRefs {
|
||||
if ownerRef.UID != "" {
|
||||
c.addToParentUIDToChildren(ownerRef.UID, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// addToParentUIDToChildren adds a child to the parent-to-children index
|
||||
func (c *clusterCache) addToParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
|
||||
// Check if child is already in the list to avoid duplicates
|
||||
children := c.parentUIDToChildren[parentUID]
|
||||
for _, existing := range children {
|
||||
if existing == childKey {
|
||||
return // Already exists, no need to add
|
||||
}
|
||||
}
|
||||
c.parentUIDToChildren[parentUID] = append(children, childKey)
|
||||
}
|
||||
|
||||
// removeFromParentUIDToChildren removes a child from the parent-to-children index
|
||||
func (c *clusterCache) removeFromParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
|
||||
children := c.parentUIDToChildren[parentUID]
|
||||
for i, existing := range children {
|
||||
if existing == childKey {
|
||||
// Remove by swapping with last element and truncating
|
||||
children[i] = children[len(children)-1]
|
||||
c.parentUIDToChildren[parentUID] = children[:len(children)-1]
|
||||
|
||||
// Clean up empty entries
|
||||
if len(c.parentUIDToChildren[parentUID]) == 0 {
|
||||
delete(c.parentUIDToChildren, parentUID)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateParentUIDToChildren updates the parent-to-children index when a resource's owner refs change
|
||||
func (c *clusterCache) updateParentUIDToChildren(childKey kube.ResourceKey, oldResource *Resource, newResource *Resource) {
|
||||
// Build sets of old and new parent UIDs
|
||||
oldParents := make(map[types.UID]struct{})
|
||||
if oldResource != nil {
|
||||
for _, ref := range oldResource.OwnerRefs {
|
||||
if ref.UID != "" {
|
||||
oldParents[ref.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newParents := make(map[types.UID]struct{})
|
||||
for _, ref := range newResource.OwnerRefs {
|
||||
if ref.UID != "" {
|
||||
newParents[ref.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from parents that are no longer in owner refs
|
||||
for oldUID := range oldParents {
|
||||
if _, exists := newParents[oldUID]; !exists {
|
||||
c.removeFromParentUIDToChildren(oldUID, childKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Add to parents that are new in owner refs
|
||||
for newUID := range newParents {
|
||||
if _, exists := oldParents[newUID]; !exists {
|
||||
c.addToParentUIDToChildren(newUID, childKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate cache and executes callback that optionally might update cache settings
|
||||
func (c *clusterCache) Invalidate(opts ...UpdateSettingsFunc) {
|
||||
c.lock.Lock()
|
||||
@@ -885,6 +1005,7 @@ func (c *clusterCache) sync() error {
|
||||
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
|
||||
c.resources = make(map[kube.ResourceKey]*Resource)
|
||||
c.namespacedResources = make(map[schema.GroupKind]bool)
|
||||
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
|
||||
config := c.config
|
||||
version, err := c.kubectl.GetServerVersion(config)
|
||||
if err != nil {
|
||||
@@ -983,6 +1104,9 @@ func (c *clusterCache) sync() error {
|
||||
return fmt.Errorf("failed to sync cluster %s: %w", c.config.Host, err)
|
||||
}
|
||||
|
||||
// Rebuild orphaned children index after all resources are loaded
|
||||
c.rebuildParentToChildrenIndex()
|
||||
|
||||
c.log.Info("Cluster successfully synced")
|
||||
return nil
|
||||
}
|
||||
@@ -1055,10 +1179,17 @@ func (c *clusterCache) FindResources(namespace string, predicates ...func(r *Res
|
||||
return result
|
||||
}
|
||||
|
||||
// IterateHierarchy iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree
|
||||
// IterateHierarchyV2 iterates through the hierarchy of resources starting from the given keys.
|
||||
// It efficiently traverses parent-child relationships, including cross-namespace relationships
|
||||
// between cluster-scoped parents and namespaced children, using pre-computed indexes.
|
||||
func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
// Track visited resources to avoid cycles
|
||||
visited := make(map[kube.ResourceKey]int)
|
||||
|
||||
// Group keys by namespace for efficient processing
|
||||
keysPerNamespace := make(map[string][]kube.ResourceKey)
|
||||
for _, key := range keys {
|
||||
_, ok := c.resources[key]
|
||||
@@ -1067,35 +1198,125 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
|
||||
}
|
||||
keysPerNamespace[key.Namespace] = append(keysPerNamespace[key.Namespace], key)
|
||||
}
|
||||
|
||||
// Process namespaced resources with standard hierarchy
|
||||
for namespace, namespaceKeys := range keysPerNamespace {
|
||||
nsNodes := c.nsIndex[namespace]
|
||||
graph := buildGraph(nsNodes)
|
||||
visited := make(map[kube.ResourceKey]int)
|
||||
for _, key := range namespaceKeys {
|
||||
visited[key] = 0
|
||||
c.processNamespaceHierarchy(namespaceKeys, nsNodes, graph, visited, action)
|
||||
}
|
||||
|
||||
// Process pre-computed cross-namespace children
|
||||
if clusterKeys, ok := keysPerNamespace[""]; ok {
|
||||
c.processCrossNamespaceChildren(clusterKeys, visited, action)
|
||||
}
|
||||
}
|
||||
|
||||
// processCrossNamespaceChildren processes namespaced children of cluster-scoped resources
|
||||
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries
|
||||
func (c *clusterCache) processCrossNamespaceChildren(
|
||||
clusterScopedKeys []kube.ResourceKey,
|
||||
visited map[kube.ResourceKey]int,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
for _, clusterKey := range clusterScopedKeys {
|
||||
// Get cluster-scoped resource to access its UID
|
||||
clusterResource := c.resources[clusterKey]
|
||||
if clusterResource == nil {
|
||||
continue
|
||||
}
|
||||
for _, key := range namespaceKeys {
|
||||
// The check for existence of key is done above.
|
||||
res := c.resources[key]
|
||||
if visited[key] == 2 || !action(res, nsNodes) {
|
||||
|
||||
// Use parent-to-children index for O(1) lookup of direct children
|
||||
childKeys := c.parentUIDToChildren[clusterResource.Ref.UID]
|
||||
for _, childKey := range childKeys {
|
||||
child := c.resources[childKey]
|
||||
if child == nil || visited[childKey] != 0 {
|
||||
continue
|
||||
}
|
||||
visited[key] = 1
|
||||
if _, ok := graph[key]; ok {
|
||||
for _, child := range graph[key] {
|
||||
if visited[child.ResourceKey()] == 0 && action(child, nsNodes) {
|
||||
child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
|
||||
if err != nil {
|
||||
c.log.V(2).Info(err.Error())
|
||||
return false
|
||||
}
|
||||
return action(child, namespaceResources)
|
||||
})
|
||||
}
|
||||
|
||||
// Get namespace nodes for this child
|
||||
nsNodes := c.nsIndex[childKey.Namespace]
|
||||
if nsNodes == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process this child
|
||||
if action(child, nsNodes) {
|
||||
visited[childKey] = 1
|
||||
// Recursively process descendants using index-based traversal
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
|
||||
visited[childKey] = 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// iterateChildrenUsingIndex recursively processes a resource's children using the parentUIDToChildren index
|
||||
// This replaces graph-based traversal with O(1) index lookups
|
||||
func (c *clusterCache) iterateChildrenUsingIndex(
|
||||
parent *Resource,
|
||||
nsNodes map[kube.ResourceKey]*Resource,
|
||||
visited map[kube.ResourceKey]int,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
// Look up direct children of this parent using the index
|
||||
childKeys := c.parentUIDToChildren[parent.Ref.UID]
|
||||
for _, childKey := range childKeys {
|
||||
if visited[childKey] != 0 {
|
||||
continue // Already visited or in progress
|
||||
}
|
||||
|
||||
child := c.resources[childKey]
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only process children in the same namespace (for within-namespace traversal)
|
||||
// Cross-namespace children are handled by the outer loop in processCrossNamespaceChildren
|
||||
if child.Ref.Namespace != parent.Ref.Namespace {
|
||||
continue
|
||||
}
|
||||
|
||||
if action(child, nsNodes) {
|
||||
visited[childKey] = 1
|
||||
// Recursively process this child's descendants
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
|
||||
visited[childKey] = 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processNamespaceHierarchy processes hierarchy for keys within a single namespace
|
||||
func (c *clusterCache) processNamespaceHierarchy(
|
||||
namespaceKeys []kube.ResourceKey,
|
||||
nsNodes map[kube.ResourceKey]*Resource,
|
||||
graph map[kube.ResourceKey]map[types.UID]*Resource,
|
||||
visited map[kube.ResourceKey]int,
|
||||
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
|
||||
) {
|
||||
for _, key := range namespaceKeys {
|
||||
visited[key] = 0
|
||||
}
|
||||
for _, key := range namespaceKeys {
|
||||
res := c.resources[key]
|
||||
if visited[key] == 2 || !action(res, nsNodes) {
|
||||
continue
|
||||
}
|
||||
visited[key] = 1
|
||||
if _, ok := graph[key]; ok {
|
||||
for _, child := range graph[key] {
|
||||
if visited[child.ResourceKey()] == 0 && action(child, nsNodes) {
|
||||
child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
|
||||
if err != nil {
|
||||
c.log.V(2).Info(err.Error())
|
||||
return false
|
||||
}
|
||||
return action(child, namespaceResources)
|
||||
})
|
||||
}
|
||||
}
|
||||
visited[key] = 2
|
||||
}
|
||||
visited[key] = 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1106,7 +1327,7 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
|
||||
nodesByUID[node.Ref.UID] = append(nodesByUID[node.Ref.UID], node)
|
||||
}
|
||||
|
||||
// In graph, they key is the parent and the value is a list of children.
|
||||
// In graph, the key is the parent and the value is a list of children.
|
||||
graph := make(map[kube.ResourceKey]map[types.UID]*Resource)
|
||||
|
||||
// Loop through all nodes, calling each one "childNode," because we're only bothering with it if it has a parent.
|
||||
@@ -1132,20 +1353,22 @@ func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map
|
||||
uidNodes, ok := nodesByUID[ownerRef.UID]
|
||||
if ok {
|
||||
for _, uidNode := range uidNodes {
|
||||
// Cache ResourceKey() to avoid repeated expensive calls
|
||||
uidNodeKey := uidNode.ResourceKey()
|
||||
// Update the graph for this owner to include the child.
|
||||
if _, ok := graph[uidNode.ResourceKey()]; !ok {
|
||||
graph[uidNode.ResourceKey()] = make(map[types.UID]*Resource)
|
||||
if _, ok := graph[uidNodeKey]; !ok {
|
||||
graph[uidNodeKey] = make(map[types.UID]*Resource)
|
||||
}
|
||||
r, ok := graph[uidNode.ResourceKey()][childNode.Ref.UID]
|
||||
r, ok := graph[uidNodeKey][childNode.Ref.UID]
|
||||
if !ok {
|
||||
graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode
|
||||
graph[uidNodeKey][childNode.Ref.UID] = childNode
|
||||
} else if r != nil {
|
||||
// The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group).
|
||||
// It is ok to pick any object, but we need to make sure we pick the same child after every refresh.
|
||||
key1 := r.ResourceKey()
|
||||
key2 := childNode.ResourceKey()
|
||||
if strings.Compare(key1.String(), key2.String()) > 0 {
|
||||
graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode
|
||||
graph[uidNodeKey][childNode.Ref.UID] = childNode
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1365,6 +1588,14 @@ func (c *clusterCache) onNodeRemoved(key kube.ResourceKey) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up parent-to-children index
|
||||
for _, ownerRef := range existing.OwnerRefs {
|
||||
if ownerRef.UID != "" {
|
||||
c.removeFromParentUIDToChildren(ownerRef.UID, key)
|
||||
}
|
||||
}
|
||||
|
||||
for _, h := range c.getResourceUpdatedHandlers() {
|
||||
h(nil, existing, ns)
|
||||
}
|
||||
|
||||
703
gitops-engine/pkg/cache/cluster_test.go
vendored
703
gitops-engine/pkg/cache/cluster_test.go
vendored
@@ -19,8 +19,10 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -91,11 +93,11 @@ func newClusterWithOptions(_ testing.TB, opts []UpdateSettingsFunc, objs ...runt
|
||||
client.PrependReactor("list", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) {
|
||||
handled, ret, err = reactor.React(action)
|
||||
if err != nil || !handled {
|
||||
return
|
||||
return handled, ret, fmt.Errorf("reactor failed: %w", err)
|
||||
}
|
||||
// make sure list response have resource version
|
||||
ret.(metav1.ListInterface).SetResourceVersion("123")
|
||||
return
|
||||
return handled, ret, nil
|
||||
})
|
||||
|
||||
apiResources := []kube.APIResourceInfo{{
|
||||
@@ -189,6 +191,104 @@ func Benchmark_sync(t *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_sync_CrossNamespace tests sync performance with cross-namespace relationships.
|
||||
// This measures the one-time cost of building cross-namespace indexes during cache synchronization.
|
||||
func Benchmark_sync_CrossNamespace(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNamespaces int
|
||||
resourcesPerNamespace int
|
||||
namespacesWithCrossNS int // Number of namespaces with cross-NS children
|
||||
crossNSResourcesPerNamespace int // Cross-NS children in each affected namespace
|
||||
}{
|
||||
// Baseline
|
||||
{"50NS_0pct_100perNS", 50, 100, 0, 0},
|
||||
|
||||
// Primary dimension: Percentage of namespaces with cross-NS children
|
||||
{"50NS_2pct_100perNS", 50, 100, 1, 10},
|
||||
{"50NS_10pct_100perNS", 50, 100, 5, 10},
|
||||
{"50NS_20pct_100perNS", 50, 100, 10, 10},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
|
||||
resources := []runtime.Object{}
|
||||
|
||||
// Create cluster-scoped parents (ClusterRoles)
|
||||
numClusterParents := 100
|
||||
clusterUIDs := make(map[string]types.UID)
|
||||
for i := 0; i < numClusterParents; i++ {
|
||||
uid := types.UID(fmt.Sprintf("cluster-uid-%d", i))
|
||||
clusterUIDs[fmt.Sprintf("cluster-role-%d", i)] = uid
|
||||
resources = append(resources, &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("cluster-role-%d", i),
|
||||
UID: uid,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Create namespaced resources
|
||||
for ns := 0; ns < tc.totalNamespaces; ns++ {
|
||||
namespace := fmt.Sprintf("namespace-%d", ns)
|
||||
hasCrossNS := ns < tc.namespacesWithCrossNS
|
||||
regularPods := tc.resourcesPerNamespace
|
||||
crossNSPods := 0
|
||||
|
||||
if hasCrossNS {
|
||||
regularPods = tc.resourcesPerNamespace - tc.crossNSResourcesPerNamespace
|
||||
crossNSPods = tc.crossNSResourcesPerNamespace
|
||||
}
|
||||
|
||||
// Regular pods without cross-namespace parents
|
||||
for i := 0; i < regularPods; i++ {
|
||||
resources = append(resources, &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%d", i),
|
||||
Namespace: namespace,
|
||||
UID: types.UID(fmt.Sprintf("pod-uid-%d-%d", ns, i)),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Pods with cross-namespace parents
|
||||
for i := 0; i < crossNSPods; i++ {
|
||||
clusterRoleName := fmt.Sprintf("cluster-role-%d", i%numClusterParents)
|
||||
resources = append(resources, &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("cross-ns-pod-%d", i),
|
||||
Namespace: namespace,
|
||||
UID: types.UID(fmt.Sprintf("cross-ns-pod-uid-%d-%d", ns, i)),
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRoleName,
|
||||
UID: clusterUIDs[clusterRoleName],
|
||||
}},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Need to add API resources for ClusterRole
|
||||
c := newCluster(b, resources...).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
err := c.sync()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureSynced(t *testing.T) {
|
||||
obj1 := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -302,12 +402,16 @@ func TestStatefulSetOwnershipInferred(t *testing.T) {
|
||||
tc.cluster.lock.Lock()
|
||||
defer tc.cluster.lock.Unlock()
|
||||
|
||||
refs := tc.cluster.resources[kube.GetResourceKey(pvc)].OwnerRefs
|
||||
resource := tc.cluster.resources[kube.GetResourceKey(pvc)]
|
||||
if resource == nil {
|
||||
return false // Resource not ready yet, keep retrying
|
||||
}
|
||||
refs := resource.OwnerRefs
|
||||
if tc.expectNoOwner {
|
||||
return len(refs) == 0
|
||||
}
|
||||
return assert.ElementsMatch(t, refs, tc.expectedRefs)
|
||||
}, 5*time.Second, 10*time.Millisecond, "Expected PVC to have correct owner reference")
|
||||
}, 5*time.Second, 20*time.Millisecond, "Expected PVC to have correct owner reference")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1045,7 +1149,7 @@ func testDeploy() *appsv1.Deployment {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIterateHierachyV2(t *testing.T) {
|
||||
func TestIterateHierarchyV2(t *testing.T) {
|
||||
cluster := newCluster(t, testPod1(), testPod2(), testRS(), testExtensionsRS(), testDeploy())
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
@@ -1157,6 +1261,307 @@ func TestIterateHierachyV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func testClusterParent() *corev1.Namespace {
|
||||
return &corev1.Namespace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
ResourceVersion: "123",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testNamespacedChild() *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "namespaced-child",
|
||||
Namespace: "test-namespace",
|
||||
UID: "namespaced-child-456",
|
||||
ResourceVersion: "123",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testClusterChild() *rbacv1.ClusterRole {
|
||||
return &rbacv1.ClusterRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-child",
|
||||
UID: "cluster-child-789",
|
||||
ResourceVersion: "123",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func TestIterateHierarchyV2_ClusterScopedParent_FindsAllChildren(t *testing.T) {
|
||||
// Test that cluster-scoped parents automatically find all their children (both cluster-scoped and namespaced)
|
||||
// This is the core behavior of the new implementation - cross-namespace relationships are always tracked
|
||||
cluster := newCluster(t, testClusterParent(), testNamespacedChild(), testClusterChild()).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
keys := []kube.ResourceKey{}
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(testClusterParent()))},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
keys = append(keys, resource.ResourceKey())
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Should find the parent and all its children (both cluster-scoped and namespaced)
|
||||
expected := []kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterChild())),
|
||||
kube.GetResourceKey(mustToUnstructured(testNamespacedChild())),
|
||||
}
|
||||
assert.ElementsMatch(t, expected, keys)
|
||||
}
|
||||
|
||||
|
||||
func TestIterateHierarchyV2_ClusterScopedParentOnly_InferredUID(t *testing.T) {
|
||||
// Test that passing only a cluster-scoped parent finds children even with inferred UIDs.
|
||||
// This should never happen but we coded defensively for this case, and at worst it would link a child
|
||||
// to the wrong parent if there were multiple parents with the same name (i.e. deleted and recreated).
|
||||
namespacedChildNoUID := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "namespaced-child-no-uid",
|
||||
Namespace: "test-namespace",
|
||||
UID: "namespaced-child-789",
|
||||
ResourceVersion: "123",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
// Note: No UID here - will need to be inferred
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, testClusterParent(), namespacedChildNoUID, testClusterChild()).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
keys := []kube.ResourceKey{}
|
||||
// Test with all namespaces - need to pass both cluster parent and namespaced children
|
||||
// as explicit keys to find them all
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(namespacedChildNoUID)),
|
||||
},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
keys = append(keys, resource.ResourceKey())
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Should find the parent and all its children, even with inferred UID
|
||||
assert.ElementsMatch(t, []kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(namespacedChildNoUID)),
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterChild())),
|
||||
}, keys)
|
||||
}
|
||||
|
||||
func TestOrphanedChildrenCleanup(t *testing.T) {
|
||||
// Test that parent-to-children index is properly cleaned up when resources are deleted
|
||||
clusterParent := testClusterParent()
|
||||
namespacedChild := testNamespacedChild()
|
||||
|
||||
cluster := newCluster(t, clusterParent, namespacedChild).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify child is tracked in parentUIDToChildren index
|
||||
cluster.lock.RLock()
|
||||
childKey := kube.GetResourceKey(mustToUnstructured(namespacedChild))
|
||||
parentUID := clusterParent.GetUID()
|
||||
|
||||
// Check that the child is in parentUIDToChildren
|
||||
children, ok := cluster.parentUIDToChildren[parentUID]
|
||||
require.True(t, ok, "parent should have entry in parentUIDToChildren")
|
||||
require.Contains(t, children, childKey, "child should be in parent's children list")
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Delete the child
|
||||
cluster.lock.Lock()
|
||||
cluster.onNodeRemoved(childKey)
|
||||
cluster.lock.Unlock()
|
||||
|
||||
// Verify cleanup: child removed from parentUIDToChildren
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.NotContains(t, children, childKey, "child should be removed from parent's children list")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Re-add the child and verify it re-populates correctly
|
||||
cluster.lock.Lock()
|
||||
cluster.setNode(cluster.newResource(mustToUnstructured(namespacedChild)))
|
||||
cluster.lock.Unlock()
|
||||
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
require.True(t, ok, "parent should be back in parentUIDToChildren")
|
||||
require.Contains(t, children, childKey, "child should be back in parent's children list")
|
||||
cluster.lock.RUnlock()
|
||||
}
|
||||
|
||||
func TestOrphanedChildrenIndex_OwnerRefLifecycle(t *testing.T) {
|
||||
// Test realistic scenarios of owner references being added and removed
|
||||
clusterParent := testClusterParent()
|
||||
|
||||
// Start with a child that has NO owner reference
|
||||
childNoOwner := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child-no-owner",
|
||||
Namespace: "test-namespace",
|
||||
UID: "child-uid-123",
|
||||
ResourceVersion: "1",
|
||||
// No OwnerReferences
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, clusterParent, childNoOwner).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
childKey := kube.GetResourceKey(mustToUnstructured(childNoOwner))
|
||||
parentUID := clusterParent.GetUID()
|
||||
|
||||
// Verify child is NOT tracked initially (no owner ref)
|
||||
cluster.lock.RLock()
|
||||
children, ok := cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.NotContains(t, children, childKey, "child without owner ref should not be in parentUIDToChildren")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Simulate controller adding owner reference (e.g., adoption)
|
||||
childWithOwner := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child-no-owner",
|
||||
Namespace: "test-namespace",
|
||||
UID: "child-uid-123",
|
||||
ResourceVersion: "2",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "test-cluster-parent",
|
||||
UID: "cluster-parent-123",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster.lock.Lock()
|
||||
cluster.setNode(cluster.newResource(mustToUnstructured(childWithOwner)))
|
||||
cluster.lock.Unlock()
|
||||
|
||||
// Verify child is NOW tracked (owner ref added)
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
require.True(t, ok, "parent should have entry in parentUIDToChildren after adding owner ref")
|
||||
require.Contains(t, children, childKey, "child should be in parent's children list after adding owner ref")
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Simulate removing owner reference (e.g., parent deletion with orphanDependents: true)
|
||||
childWithoutOwnerAgain := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child-no-owner",
|
||||
Namespace: "test-namespace",
|
||||
UID: "child-uid-123",
|
||||
ResourceVersion: "3",
|
||||
// OwnerReferences removed
|
||||
},
|
||||
}
|
||||
|
||||
cluster.lock.Lock()
|
||||
cluster.setNode(cluster.newResource(mustToUnstructured(childWithoutOwnerAgain)))
|
||||
cluster.lock.Unlock()
|
||||
|
||||
// Verify child is NO LONGER tracked (owner ref removed)
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.NotContains(t, children, childKey, "child should be removed from parentUIDToChildren after removing owner ref")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
|
||||
// Verify empty entry cleanup: parent entry should be cleaned up if it has no children
|
||||
cluster.lock.RLock()
|
||||
children, ok = cluster.parentUIDToChildren[parentUID]
|
||||
if ok {
|
||||
assert.Empty(t, children, "parent's children list should be empty or cleaned up")
|
||||
}
|
||||
cluster.lock.RUnlock()
|
||||
}
|
||||
|
||||
// Test_watchEvents_Deadlock validates that starting watches will not create a deadlock
|
||||
// caused by using improper locking in various callback methods when there is a high load on the
|
||||
// system.
|
||||
@@ -1292,3 +1697,291 @@ func BenchmarkIterateHierarchyV2(b *testing.B) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// buildClusterParentTestResourceMap creates test resources with configurable namespace distribution.
|
||||
// Parameters:
|
||||
// - clusterParents: number of cluster-scoped parent resources (ClusterRoles)
|
||||
// - totalNamespaces: total number of namespaces to create
|
||||
// - namespacesWithCrossNS: how many of those namespaces contain cross-namespace children
|
||||
// - resourcesPerNamespace: resources in each namespace
|
||||
// - crossNSResourcesPerNamespace: how many cross-namespace children in each affected namespace
|
||||
func buildClusterParentTestResourceMap(
|
||||
clusterParents, totalNamespaces, namespacesWithCrossNS, resourcesPerNamespace, crossNSResourcesPerNamespace int,
|
||||
) map[kube.ResourceKey]*Resource {
|
||||
resources := make(map[kube.ResourceKey]*Resource)
|
||||
|
||||
// Create cluster-scoped parents (ClusterRoles)
|
||||
clusterParentUIDs := make(map[string]string)
|
||||
for i := 0; i < clusterParents; i++ {
|
||||
clusterRoleName := fmt.Sprintf("cluster-role-%d", i)
|
||||
uid := uuid.New().String()
|
||||
clusterParentUIDs[clusterRoleName] = uid
|
||||
|
||||
key := kube.ResourceKey{
|
||||
Group: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Namespace: "",
|
||||
Name: clusterRoleName,
|
||||
}
|
||||
|
||||
resourceYaml := fmt.Sprintf(`
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: %s
|
||||
uid: %s
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list"]`, clusterRoleName, uid)
|
||||
|
||||
resources[key] = cacheTest.newResource(strToUnstructured(resourceYaml))
|
||||
}
|
||||
|
||||
// Generate namespace names
|
||||
namespaces := make([]string, totalNamespaces)
|
||||
for i := 0; i < totalNamespaces; i++ {
|
||||
namespaces[i] = fmt.Sprintf("ns-%d", i)
|
||||
}
|
||||
|
||||
// For each namespace
|
||||
for nsIdx, namespace := range namespaces {
|
||||
hasCrossNS := nsIdx < namespacesWithCrossNS
|
||||
regularPodsInNS := resourcesPerNamespace
|
||||
crossNSPodsInNS := 0
|
||||
|
||||
if hasCrossNS {
|
||||
regularPodsInNS = resourcesPerNamespace - crossNSResourcesPerNamespace
|
||||
crossNSPodsInNS = crossNSResourcesPerNamespace
|
||||
}
|
||||
|
||||
// Create regular namespaced resources (Pods)
|
||||
for i := 0; i < regularPodsInNS; i++ {
|
||||
name := fmt.Sprintf("pod-%s-%d", namespace, i)
|
||||
uid := uuid.New().String()
|
||||
|
||||
key := kube.ResourceKey{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
Kind: "Pod",
|
||||
}
|
||||
|
||||
resourceYaml := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
namespace: %s
|
||||
name: %s
|
||||
uid: %s`, namespace, name, uid)
|
||||
|
||||
resources[key] = cacheTest.newResource(strToUnstructured(resourceYaml))
|
||||
}
|
||||
|
||||
// Create cross-namespace children if this namespace has them
|
||||
for i := 0; i < crossNSPodsInNS; i++ {
|
||||
podName := fmt.Sprintf("cross-ns-pod-%s-%d", namespace, i)
|
||||
clusterRoleIndex := i % clusterParents
|
||||
clusterRoleName := fmt.Sprintf("cluster-role-%d", clusterRoleIndex)
|
||||
parentUID := clusterParentUIDs[clusterRoleName]
|
||||
uid := uuid.New().String()
|
||||
|
||||
key := kube.ResourceKey{
|
||||
Namespace: namespace,
|
||||
Name: podName,
|
||||
Kind: "Pod",
|
||||
}
|
||||
|
||||
resourceYaml := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: %s
|
||||
namespace: %s
|
||||
uid: %s
|
||||
ownerReferences:
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
name: %s
|
||||
uid: %s`, podName, namespace, uid, clusterRoleName, parentUID)
|
||||
|
||||
resources[key] = cacheTest.newResource(strToUnstructured(resourceYaml))
|
||||
}
|
||||
}
|
||||
|
||||
return resources
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// BenchmarkIterateHierarchyV2_ClusterParentTraversal benchmarks full hierarchy traversal
|
||||
// starting from cluster-scoped parents with varying percentages of namespaces containing
|
||||
// cross-namespace children. This tests the actual performance impact of the cross-namespace
|
||||
// relationship tracking feature.
|
||||
func BenchmarkIterateHierarchyV2_ClusterParentTraversal(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNamespaces int
|
||||
resourcesPerNamespace int
|
||||
namespacesWithCrossNS int // Number of namespaces with cross-NS children
|
||||
crossNSResourcesPerNamespace int // Cross-NS children in each affected namespace
|
||||
}{
|
||||
// Baseline: 0% of namespaces have cross-NS children
|
||||
{"50NS_0pct_100perNS", 50, 100, 0, 0},
|
||||
|
||||
// Primary dimension: Percentage of namespaces with cross-NS children
|
||||
// 5,000 total resources (50 NS × 100 resources/NS), 10 cross-NS children per affected namespace
|
||||
{"50NS_2pct_100perNS_10cross", 50, 100, 1, 10}, // 2% of namespaces (1/50)
|
||||
{"50NS_4pct_100perNS_10cross", 50, 100, 2, 10}, // 4% of namespaces (2/50)
|
||||
{"50NS_10pct_100perNS_10cross", 50, 100, 5, 10}, // 10% of namespaces (5/50)
|
||||
{"50NS_20pct_100perNS_10cross", 50, 100, 10, 10}, // 20% of namespaces (10/50)
|
||||
|
||||
// Secondary dimension: Within a namespace, % of resources that are cross-NS
|
||||
// 5,000 total resources, 2% of namespaces (1/50) have cross-NS children
|
||||
{"50NS_2pct_100perNS_10cross", 50, 100, 1, 10}, // 10% of namespace resources (10/100)
|
||||
{"50NS_2pct_100perNS_25cross", 50, 100, 1, 25}, // 25% of namespace resources (25/100)
|
||||
{"50NS_2pct_100perNS_50cross", 50, 100, 1, 50}, // 50% of namespace resources (50/100)
|
||||
|
||||
// Edge cases
|
||||
{"100NS_1pct_100perNS_10cross", 100, 100, 1, 10}, // 1% of namespaces (1/100) - extreme clustering
|
||||
{"50NS_100pct_100perNS_10cross", 50, 100, 50, 10}, // 100% of namespaces - worst case
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
|
||||
cluster := newCluster(b).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Pod"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}})
|
||||
|
||||
// CRITICAL: Initialize namespacedResources so setNode will populate orphanedChildren index
|
||||
cluster.namespacedResources = map[schema.GroupKind]bool{
|
||||
{Group: "", Kind: "Pod"}: true,
|
||||
{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: false,
|
||||
}
|
||||
|
||||
clusterParents := 100 // Fixed number of cluster-scoped resources
|
||||
|
||||
testResources := buildClusterParentTestResourceMap(
|
||||
clusterParents,
|
||||
tc.totalNamespaces,
|
||||
tc.namespacesWithCrossNS,
|
||||
tc.resourcesPerNamespace,
|
||||
tc.crossNSResourcesPerNamespace,
|
||||
)
|
||||
|
||||
// Add resources to cache - this will populate orphanedChildren index
|
||||
for _, resource := range testResources {
|
||||
cluster.setNode(resource)
|
||||
}
|
||||
|
||||
// Verify indexes are populated (sanity check)
|
||||
if tc.namespacesWithCrossNS > 0 {
|
||||
if len(cluster.parentUIDToChildren) == 0 {
|
||||
b.Fatal("parentUIDToChildren index not populated - benchmark setup is broken")
|
||||
}
|
||||
}
|
||||
|
||||
// Always start from a cluster-scoped parent to test cross-namespace traversal
|
||||
startKey := kube.ResourceKey{
|
||||
Group: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Namespace: "",
|
||||
Name: "cluster-role-0",
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
cluster.IterateHierarchyV2([]kube.ResourceKey{startKey}, func(_ *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func TestIterateHierarchyV2_NoDuplicatesInSameNamespace(t *testing.T) {
|
||||
// Create a parent-child relationship in the same namespace
|
||||
parent := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "parent", Namespace: "default", UID: "parent-uid",
|
||||
},
|
||||
}
|
||||
child := &appsv1.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "ReplicaSet"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "child", Namespace: "default", UID: "child-uid",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "apps/v1", Kind: "Deployment", Name: "parent", UID: "parent-uid",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, parent, child)
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
visitCount := make(map[string]int)
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(parent)),
|
||||
kube.GetResourceKey(mustToUnstructured(child)),
|
||||
},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
visitCount[resource.Ref.Name]++
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Each resource should be visited exactly once
|
||||
assert.Equal(t, 1, visitCount["parent"], "parent should be visited once")
|
||||
assert.Equal(t, 1, visitCount["child"], "child should be visited once")
|
||||
}
|
||||
|
||||
func TestIterateHierarchyV2_NoDuplicatesCrossNamespace(t *testing.T) {
|
||||
// Test that cross-namespace parent-child relationships don't cause duplicates
|
||||
visitCount := make(map[string]int)
|
||||
|
||||
cluster := newCluster(t, testClusterParent(), testNamespacedChild(), testClusterChild()).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterParent())),
|
||||
kube.GetResourceKey(mustToUnstructured(testNamespacedChild())),
|
||||
kube.GetResourceKey(mustToUnstructured(testClusterChild())),
|
||||
},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
visitCount[resource.Ref.Name]++
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Each resource should be visited exactly once, even with cross-namespace relationships
|
||||
assert.Equal(t, 1, visitCount["test-cluster-parent"], "cluster parent should be visited once")
|
||||
assert.Equal(t, 1, visitCount["namespaced-child"], "namespaced child should be visited once")
|
||||
assert.Equal(t, 1, visitCount["cluster-child"], "cluster child should be visited once")
|
||||
}
|
||||
|
||||
2
gitops-engine/pkg/cache/mocks/ClusterCache.go
generated
vendored
2
gitops-engine/pkg/cache/mocks/ClusterCache.go
generated
vendored
@@ -563,7 +563,7 @@ func (_c *ClusterCache_IsNamespaced_Call) RunAndReturn(run func(gk schema.GroupK
|
||||
return _c
|
||||
}
|
||||
|
||||
// IterateHierarchyV2 provides a mock function for the type ClusterCache
|
||||
// IterateHierarchyV2 provides a mock function with given fields: keys, action, orphanedResourceNamespace
|
||||
func (_mock *ClusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *cache.Resource, namespaceResources map[kube.ResourceKey]*cache.Resource) bool) {
|
||||
_mock.Called(keys, action)
|
||||
return
|
||||
|
||||
6
gitops-engine/pkg/cache/resource.go
vendored
6
gitops-engine/pkg/cache/resource.go
vendored
@@ -91,9 +91,9 @@ func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*R
|
||||
if !ok || children == nil {
|
||||
return
|
||||
}
|
||||
for _, c := range children {
|
||||
childKey := c.ResourceKey()
|
||||
child := ns[childKey]
|
||||
for _, child := range children {
|
||||
childKey := child.ResourceKey()
|
||||
// For cross-namespace relationships, child might not be in ns, so use it directly from graph
|
||||
switch visited[childKey] {
|
||||
case 1:
|
||||
// Since we encountered a node that we're currently processing, we know we have a circular dependency.
|
||||
|
||||
Reference in New Issue
Block a user