package cache import ( "context" "errors" "fmt" "sort" "strings" "sync" "testing" "time" "golang.org/x/sync/semaphore" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" testcore "k8s.io/client-go/testing" "sigs.k8s.io/yaml" "github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube" "github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube/kubetest" ) func mustToUnstructured(obj any) *unstructured.Unstructured { un, err := kube.ToUnstructured(obj) if err != nil { panic(err) } return un } func strToUnstructured(jsonStr string) *unstructured.Unstructured { obj := make(map[string]any) err := yaml.Unmarshal([]byte(jsonStr), &obj) if err != nil { panic(err) } return &unstructured.Unstructured{Object: obj} } var ( testCreationTime, _ = time.Parse(time.RFC3339, "2018-09-20T06:47:27Z") testService = strToUnstructured(fmt.Sprintf(` apiVersion: v1 kind: Service metadata: name: helm-guestbook namespace: default resourceVersion: "123" uid: "4" creationTimestamp: "%s" spec: selector: app: guestbook type: LoadBalancer status: loadBalancer: ingress: - hostname: localhost`, testCreationTime.UTC().Format(time.RFC3339))) ) func newCluster(tb testing.TB, objs ...runtime.Object) *clusterCache { tb.Helper() cache := newClusterWithOptions(tb, []UpdateSettingsFunc{}, objs...) tb.Cleanup(func() { cache.Invalidate() }) return cache } func newClusterWithOptions(_ testing.TB, opts []UpdateSettingsFunc, objs ...runtime.Object) *clusterCache { client := fake.NewSimpleDynamicClient(scheme.Scheme, objs...) reactor := client.ReactionChain[0] client.PrependReactor("list", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) { handled, ret, err = reactor.React(action) if err != nil || !handled { return handled, ret, fmt.Errorf("reactor failed: %w", err) } // make sure list response have resource version ret.(metav1.ListInterface).SetResourceVersion("123") return handled, ret, nil }) apiResources := []kube.APIResourceInfo{{ GroupKind: schema.GroupKind{Group: "", Kind: "Pod"}, GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, Meta: metav1.APIResource{Namespaced: true}, }, { GroupKind: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}, GroupVersionResource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, Meta: metav1.APIResource{Namespaced: true}, }, { GroupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"}, GroupVersionResource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, Meta: metav1.APIResource{Namespaced: true}, }, { GroupKind: schema.GroupKind{Group: "apps", Kind: "StatefulSet"}, GroupVersionResource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}, Meta: metav1.APIResource{Namespaced: true}, }, { GroupKind: schema.GroupKind{Group: "extensions", Kind: "ReplicaSet"}, GroupVersionResource: schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"}, Meta: metav1.APIResource{Namespaced: true}, }} opts = append([]UpdateSettingsFunc{ SetKubectl(&kubetest.MockKubectlCmd{APIResources: apiResources, DynamicClient: client}), }, opts...) cache := NewClusterCache( &rest.Config{Host: "https://test"}, opts..., ) return cache } func (c *clusterCache) WithAPIResources(newApiResources []kube.APIResourceInfo) *clusterCache { apiResources := c.kubectl.(*kubetest.MockKubectlCmd).APIResources apiResources = append(apiResources, newApiResources...) c.kubectl.(*kubetest.MockKubectlCmd).APIResources = apiResources return c } func getChildren(cluster *clusterCache, un *unstructured.Unstructured) []*Resource { hierarchy := make([]*Resource, 0) cluster.IterateHierarchyV2([]kube.ResourceKey{kube.GetResourceKey(un)}, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { hierarchy = append(hierarchy, child) return true }) return hierarchy[1:] } // Benchmark_sync is meant to simulate cluster initialization when populateResourceInfoHandler does nontrivial work. func Benchmark_sync(t *testing.B) { resources := []runtime.Object{} for i := 0; i < 100; i++ { resources = append(resources, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("pod-%d", i), Namespace: "default", }, }, &appsv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("rs-%d", i), Namespace: "default", }, }, &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("deploy-%d", i), Namespace: "default", }, }, &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("sts-%d", i), Namespace: "default", }, }) } c := newCluster(t, resources...) c.populateResourceInfoHandler = func(_ *unstructured.Unstructured, _ bool) (info any, cacheManifest bool) { time.Sleep(10 * time.Microsecond) return nil, false } t.ResetTimer() for n := 0; n < t.N; n++ { err := c.sync() require.NoError(t, err) } } // Benchmark_sync_CrossNamespace tests sync performance with cross-namespace relationships. // This measures the one-time cost of building cross-namespace indexes during cache synchronization. func Benchmark_sync_CrossNamespace(b *testing.B) { testCases := []struct { name string totalNamespaces int resourcesPerNamespace int namespacesWithCrossNS int // Number of namespaces with cross-NS children crossNSResourcesPerNamespace int // Cross-NS children in each affected namespace }{ // Baseline {"50NS_0pct_100perNS", 50, 100, 0, 0}, // Primary dimension: Percentage of namespaces with cross-NS children {"50NS_2pct_100perNS", 50, 100, 1, 10}, {"50NS_10pct_100perNS", 50, 100, 5, 10}, {"50NS_20pct_100perNS", 50, 100, 10, 10}, } for _, tc := range testCases { b.Run(tc.name, func(b *testing.B) { resources := []runtime.Object{} // Create cluster-scoped parents (ClusterRoles) numClusterParents := 100 clusterUIDs := make(map[string]types.UID) for i := 0; i < numClusterParents; i++ { uid := types.UID(fmt.Sprintf("cluster-uid-%d", i)) clusterUIDs[fmt.Sprintf("cluster-role-%d", i)] = uid resources = append(resources, &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("cluster-role-%d", i), UID: uid, }, }) } // Create namespaced resources for ns := 0; ns < tc.totalNamespaces; ns++ { namespace := fmt.Sprintf("namespace-%d", ns) hasCrossNS := ns < tc.namespacesWithCrossNS regularPods := tc.resourcesPerNamespace crossNSPods := 0 if hasCrossNS { regularPods = tc.resourcesPerNamespace - tc.crossNSResourcesPerNamespace crossNSPods = tc.crossNSResourcesPerNamespace } // Regular pods without cross-namespace parents for i := 0; i < regularPods; i++ { resources = append(resources, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("pod-%d", i), Namespace: namespace, UID: types.UID(fmt.Sprintf("pod-uid-%d-%d", ns, i)), }, }) } // Pods with cross-namespace parents for i := 0; i < crossNSPods; i++ { clusterRoleName := fmt.Sprintf("cluster-role-%d", i%numClusterParents) resources = append(resources, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("cross-ns-pod-%d", i), Namespace: namespace, UID: types.UID(fmt.Sprintf("cross-ns-pod-uid-%d-%d", ns, i)), OwnerReferences: []metav1.OwnerReference{{ APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole", Name: clusterRoleName, UID: clusterUIDs[clusterRoleName], }}, }, }) } } // Need to add API resources for ClusterRole c := newCluster(b, resources...).WithAPIResources([]kube.APIResourceInfo{{ GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}, GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, Meta: metav1.APIResource{Namespaced: false}, }}) b.ResetTimer() b.ReportAllocs() for n := 0; n < b.N; n++ { err := c.sync() require.NoError(b, err) } }) } } func TestEnsureSynced(t *testing.T) { obj1 := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ APIVersion: "apps/v1", Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ Name: "helm-guestbook1", Namespace: "default1", }, } obj2 := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ APIVersion: "apps/v1", Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ Name: "helm-guestbook2", Namespace: "default2", }, } cluster := newCluster(t, obj1, obj2) err := cluster.EnsureSynced() require.NoError(t, err) cluster.lock.Lock() defer cluster.lock.Unlock() assert.Len(t, cluster.resources, 2) var names []string for k := range cluster.resources { names = append(names, k.Name) } assert.ElementsMatch(t, []string{"helm-guestbook1", "helm-guestbook2"}, names) } func TestStatefulSetOwnershipInferred(t *testing.T) { var opts []UpdateSettingsFunc opts = append(opts, func(c *clusterCache) { c.batchEventsProcessing = true c.eventProcessingInterval = 1 * time.Millisecond }) sts := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind}, ObjectMeta: metav1.ObjectMeta{UID: "123", Name: "web", Namespace: "default"}, Spec: appsv1.StatefulSetSpec{ VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{ ObjectMeta: metav1.ObjectMeta{ Name: "www", }, }}, }, } tests := []struct { name string cluster *clusterCache pvc *corev1.PersistentVolumeClaim expectedRefs []metav1.OwnerReference expectNoOwner bool }{ { name: "STSTemplateNameNotMatching", cluster: newCluster(t, sts), pvc: &corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{Kind: kube.PersistentVolumeClaimKind}, ObjectMeta: metav1.ObjectMeta{Name: "www1-web-0", Namespace: "default"}, }, expectNoOwner: true, }, { name: "MatchingSTSExists", cluster: newCluster(t, sts), pvc: &corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{Kind: kube.PersistentVolumeClaimKind}, ObjectMeta: metav1.ObjectMeta{Name: "www-web-0", Namespace: "default"}, }, expectedRefs: []metav1.OwnerReference{{APIVersion: "apps/v1", Kind: kube.StatefulSetKind, Name: "web", UID: "123"}}, }, { name: "STSTemplateNameNotMatchingWithBatchProcessing", cluster: newClusterWithOptions(t, opts, sts), pvc: &corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{Kind: kube.PersistentVolumeClaimKind}, ObjectMeta: metav1.ObjectMeta{Name: "www1-web-0", Namespace: "default"}, }, expectNoOwner: true, }, { name: "MatchingSTSExistsWithBatchProcessing", cluster: newClusterWithOptions(t, opts, sts), pvc: &corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{Kind: kube.PersistentVolumeClaimKind}, ObjectMeta: metav1.ObjectMeta{Name: "www-web-0", Namespace: "default"}, }, expectedRefs: []metav1.OwnerReference{{APIVersion: "apps/v1", Kind: kube.StatefulSetKind, Name: "web", UID: "123"}}, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { err := tc.cluster.EnsureSynced() require.NoError(t, err) pvc := mustToUnstructured(tc.pvc) tc.cluster.recordEvent(watch.Added, pvc) require.Eventually(t, func() bool { tc.cluster.lock.Lock() defer tc.cluster.lock.Unlock() resource := tc.cluster.resources[kube.GetResourceKey(pvc)] if resource == nil { return false // Resource not ready yet, keep retrying } refs := resource.OwnerRefs if tc.expectNoOwner { return len(refs) == 0 } return assert.ElementsMatch(t, refs, tc.expectedRefs) }, 5*time.Second, 20*time.Millisecond, "Expected PVC to have correct owner reference") }) } } // TestStatefulSetPVC_ParentToChildrenIndex verifies that inferred StatefulSet → PVC // relationships are correctly captured in the parentUIDToChildren index during initial sync. // // The index is updated inline when inferred owner refs are added in setNode() // (see the inferred parent handling section in clusterCache.setNode). func TestStatefulSetPVC_ParentToChildrenIndex(t *testing.T) { stsUID := types.UID("sts-uid-123") // StatefulSet with volumeClaimTemplate named "data" sts := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind}, ObjectMeta: metav1.ObjectMeta{UID: stsUID, Name: "web", Namespace: "default"}, Spec: appsv1.StatefulSetSpec{ VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{ ObjectMeta: metav1.ObjectMeta{Name: "data"}, }}, }, } // PVCs that match the StatefulSet's volumeClaimTemplate pattern: