Compare commits

..

3 Commits

Author SHA1 Message Date
Dan Garfield
2b4291ba14 Remove accidental spaces
Co-authored-by: Dan Garfield <dan@codefresh.io>
Signed-off-by: Dan Garfield <dan@codefresh.io>
Signed-off-by: Dan Garfield <dan.garfield@octopus.com>
2026-03-26 08:27:08 +01:00
Dan Garfield
b59a03e35a Apply suggestion from @todaywasawesome
Signed-off-by: Dan Garfield <dan@codefresh.io>
Signed-off-by: Dan Garfield <dan.garfield@octopus.com>
2026-03-26 08:27:08 +01:00
Dan Garfield
40ea69a736 Fix formatting and clarity in Keycloak integration docs
Corrected formatting inconsistencies and improved clarity in the Keycloak documentation for Argo CD integration.

Signed-off-by: Dan Garfield <dan.garfield@octopus.com>
2026-03-26 08:27:07 +01:00
14 changed files with 107 additions and 570 deletions

View File

@@ -848,9 +848,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
log.Errorf("CompareAppState error getting server side diff dry run applier: %s", err)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
}
if cleanup != nil {
defer cleanup()
}
defer cleanup()
diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(applier))
}

View File

@@ -1,21 +1,21 @@
# Keycloak
Keycloak and ArgoCD integration can be configured in two ways with Client authentication and with PKCE.
Keycloak and Argo CD integration can be configured in two ways with Client authentication and with PKCE.
If you need to authenticate with __argo-cd command line__, you must choose PKCE way.
* [Keycloak and ArgoCD with Client authentication](#keycloak-and-argocd-with-client-authentication)
* [Keycloak and ArgoCD with PKCE](#keycloak-and-argocd-with-pkce)
* [Keycloak and Argo CD with Client authentication](#keycloak-and-argocd-with-client-authentication)
* [Keycloak and Argo CD with PKCE](#keycloak-and-argocd-with-pkce)
## Keycloak and ArgoCD with Client authentication
## Keycloak and Argo CD with Client authentication
These instructions will take you through the entire process of getting your ArgoCD application authenticating with Keycloak.
These instructions will take you through the entire process of getting your Argo CD application to authenticate with Keycloak.
You will create a client within Keycloak and configure ArgoCD to use Keycloak for authentication, using groups set in Keycloak
Start by creating a client within Keycloak and configure Argo CD to use Keycloak for authentication, using groups set in Keycloak
to determine privileges in Argo.
### Creating a new client in Keycloak
First we need to setup a new client.
First, setup a new client.
Start by logging into your keycloak server, select the realm you want to use (`master` by default)
and then go to __Clients__ and click the __Create client__ button at the top.
@@ -37,11 +37,11 @@ but it's not recommended in production).
Make sure to click __Save__.
There should be a tab called __Credentials__. You can copy the Client Secret that we'll use in our ArgoCD configuration.
There should be a tab called __Credentials__. You can copy the Client Secret that we'll use in our Argo CD configuration.
![Keycloak client secret](../../assets/keycloak-client-secret.png "Keycloak client secret")
### Configuring ArgoCD OIDC
### Configuring Argo CD OIDC
Let's start by storing the client secret you generated earlier in the argocd secret _argocd-secret_.
@@ -68,7 +68,7 @@ data:
clientID: argocd
clientSecret: $oidc.keycloak.clientSecret
refreshTokenThreshold: 2m
requestedScopes: ["openid", "profile", "email", "groups"]
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
```
Make sure that:
@@ -80,18 +80,18 @@ Make sure that:
- __requestedScopes__ contains the _groups_ claim if you didn't add it to the Default scopes
- __refreshTokenThreshold__ is less than the client token lifetime. If this setting is not less than the token lifetime, a new token will be obtained for every request. Keycloak sets the client token lifetime to 5 minutes by default.
## Keycloak and ArgoCD with PKCE
## Keycloak and Argo CD with PKCE
These instructions will take you through the entire process of getting your ArgoCD application authenticating with Keycloak.
These instructions will take you through the entire process of getting your Argo CD application authenticating with Keycloak.
You will create a client within Keycloak and configure ArgoCD to use Keycloak for authentication, using groups set in Keycloak
You will create a client within Keycloak and configure Argo CD to use Keycloak for authentication, using groups set in Keycloak
to determine privileges in Argo.
You will also be able to authenticate using argo-cd command line.
### Creating a new client in Keycloak
First we need to setup a new client.
First, setup a new client.
Start by logging into your keycloak server, select the realm you want to use (`master` by default)
and then go to __Clients__ and click the __Create client__ button at the top.
@@ -119,7 +119,7 @@ Now go to a tab called __Advanced__, look for parameter named __Proof Key for Co
![Keycloak configure client Step 2](../../assets/keycloak-configure-client-pkce_2.png "Keycloak configure client Step 2")
Make sure to click __Save__.
### Configuring ArgoCD OIDC
### Configuring Argo CD OIDC
Now we can configure the config map and add the oidc configuration to enable our keycloak authentication.
You can use `$ kubectl edit configmap argocd-cm`.
@@ -138,7 +138,7 @@ data:
clientID: argocd
enablePKCEAuthentication: true
refreshTokenThreshold: 2m
requestedScopes: ["openid", "profile", "email", "groups"]
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
```
Make sure that:
@@ -146,13 +146,13 @@ Make sure that:
- __issuer__ ends with the correct realm (in this example _master_)
- __issuer__ on Keycloak releases older than version 17 the URL must include /auth (in this example /auth/realms/master)
- __clientID__ is set to the Client ID you configured in Keycloak
- __enablePKCEAuthentication__ must be set to true to enable correct ArgoCD behaviour with PKCE
- __enablePKCEAuthentication__ must be set to true to enable correct Argo CD behaviour with PKCE
- __requestedScopes__ contains the _groups_ claim if you didn't add it to the Default scopes
- __refreshTokenThreshold__ is less than the client token lifetime. If this setting is not less than the token lifetime, a new token will be obtained for every request. Keycloak sets the client token lifetime to 5 minutes by default.
## Configuring the groups claim
In order for ArgoCD to provide the groups the user is in we need to configure a groups claim that can be included in the authentication token.
In order for Argo CD to provide the groups the user is in we need to configure a groups claim that can be included in the authentication token.
To do this we'll start by creating a new __Client Scope__ called _groups_.
@@ -174,7 +174,7 @@ Go back to the client we've created earlier and go to the Tab "Client Scopes".
Click on "Add client scope", choose the _groups_ scope and add it either to the __Default__ or to the __Optional__ Client Scope.
If you put it in the Optional
category you will need to make sure that ArgoCD requests the scope in its OIDC configuration.
category you will need to make sure that Argo CD requests the scope in its OIDC configuration.
Since we will always want group information, I recommend
using the Default category.
@@ -184,7 +184,7 @@ Create a group called _ArgoCDAdmins_ and have your current user join the group.
![Keycloak user group](../../assets/keycloak-user-group.png "Keycloak user group")
## Configuring ArgoCD Policy
## Configuring Argo CD Policy
Now that we have an authentication that provides groups we want to apply a policy to these groups.
We can modify the _argocd-rbac-cm_ ConfigMap using `$ kubectl edit configmap argocd-rbac-cm`.
@@ -205,7 +205,7 @@ In this example we give the role _role:admin_ to all users in the group _ArgoCDA
You can now login using our new Keycloak OIDC authentication:
![Keycloak ArgoCD login](../../assets/keycloak-login.png "Keycloak ArgoCD login")
![Keycloak Argo CD login](../../assets/keycloak-login.png "Keycloak Argo CD login")
If you have used PKCE method, you can also authenticate using command line:
```bash
@@ -219,7 +219,7 @@ Once done, you should see
![Authentication successful!](../../assets/keycloak-authentication-successful.png "Authentication successful!")
## Troubleshoot
If ArgoCD auth returns 401 or when the login attempt leads to the loop, then restart the argocd-server pod.
If Argo CD auth returns 401 or when the login attempt leads to the loop, then restart the argocd-server pod.
```
kubectl rollout restart deployment argocd-server -n argocd
```

View File

@@ -220,7 +220,7 @@ func NewClusterCache(config *rest.Config, opts ...UpdateSettingsFunc) *clusterCa
listRetryLimit: 1,
listRetryUseBackoff: false,
listRetryFunc: ListRetryFuncNever,
parentUIDToChildren: make(map[types.UID]map[kube.ResourceKey]struct{}),
parentUIDToChildren: make(map[types.UID][]kube.ResourceKey),
}
for i := range opts {
opts[i](cache)
@@ -280,11 +280,10 @@ type clusterCache struct {
respectRBAC int
// Parent-to-children index for O(1) child lookup during hierarchy traversal
// Maps any resource's UID to a set of its direct children's ResourceKeys
// Using a set eliminates O(k) duplicate checking on insertions
// Used for cross-namespace hierarchy traversal; namespaced traversal still builds a graph
parentUIDToChildren map[types.UID]map[kube.ResourceKey]struct{}
// Parent-to-children index for O(1) hierarchy traversal
// Maps any resource's UID to its direct children's ResourceKeys
// Eliminates need for O(n) graph building during hierarchy traversal
parentUIDToChildren map[types.UID][]kube.ResourceKey
}
type clusterCacheSync struct {
@@ -505,35 +504,27 @@ func (c *clusterCache) setNode(n *Resource) {
for k, v := range ns {
// update child resource owner references
if n.isInferredParentOf != nil && mightHaveInferredOwner(v) {
shouldBeParent := n.isInferredParentOf(k)
v.setOwnerRef(n.toOwnerRef(), shouldBeParent)
// Update index inline for inferred ref changes.
// Note: The removal case (shouldBeParent=false) is currently unreachable for
// StatefulSet→PVC relationships because Kubernetes makes volumeClaimTemplates
// immutable. We include it for defensive correctness and future-proofing.
if n.Ref.UID != "" {
if shouldBeParent {
c.addToParentUIDToChildren(n.Ref.UID, k)
} else {
c.removeFromParentUIDToChildren(n.Ref.UID, k)
}
}
v.setOwnerRef(n.toOwnerRef(), n.isInferredParentOf(k))
}
if mightHaveInferredOwner(n) && v.isInferredParentOf != nil {
childKey := n.ResourceKey()
shouldBeParent := v.isInferredParentOf(childKey)
n.setOwnerRef(v.toOwnerRef(), shouldBeParent)
// Update index inline for inferred ref changes.
// Note: The removal case (shouldBeParent=false) is currently unreachable for
// StatefulSet→PVC relationships because Kubernetes makes volumeClaimTemplates
// immutable. We include it for defensive correctness and future-proofing.
if v.Ref.UID != "" {
if shouldBeParent {
c.addToParentUIDToChildren(v.Ref.UID, childKey)
} else {
c.removeFromParentUIDToChildren(v.Ref.UID, childKey)
}
}
n.setOwnerRef(v.toOwnerRef(), v.isInferredParentOf(n.ResourceKey()))
}
}
}
}
// rebuildParentToChildrenIndex rebuilds the parent-to-children index after a full sync
// This is called after initial sync to ensure all parent-child relationships are tracked
func (c *clusterCache) rebuildParentToChildrenIndex() {
// Clear existing index
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
// Rebuild parent-to-children index from all resources with owner refs
for _, resource := range c.resources {
key := resource.ResourceKey()
for _, ownerRef := range resource.OwnerRefs {
if ownerRef.UID != "" {
c.addToParentUIDToChildren(ownerRef.UID, key)
}
}
}
@@ -541,29 +532,31 @@ func (c *clusterCache) setNode(n *Resource) {
// addToParentUIDToChildren adds a child to the parent-to-children index
func (c *clusterCache) addToParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
// Get or create the set for this parent
childrenSet := c.parentUIDToChildren[parentUID]
if childrenSet == nil {
childrenSet = make(map[kube.ResourceKey]struct{})
c.parentUIDToChildren[parentUID] = childrenSet
// Check if child is already in the list to avoid duplicates
children := c.parentUIDToChildren[parentUID]
for _, existing := range children {
if existing == childKey {
return // Already exists, no need to add
}
}
// Add child to set (O(1) operation, automatically handles duplicates)
childrenSet[childKey] = struct{}{}
c.parentUIDToChildren[parentUID] = append(children, childKey)
}
// removeFromParentUIDToChildren removes a child from the parent-to-children index
func (c *clusterCache) removeFromParentUIDToChildren(parentUID types.UID, childKey kube.ResourceKey) {
childrenSet := c.parentUIDToChildren[parentUID]
if childrenSet == nil {
return
}
children := c.parentUIDToChildren[parentUID]
for i, existing := range children {
if existing == childKey {
// Remove by swapping with last element and truncating
children[i] = children[len(children)-1]
c.parentUIDToChildren[parentUID] = children[:len(children)-1]
// Remove child from set (O(1) operation)
delete(childrenSet, childKey)
// Clean up empty sets to avoid memory leaks
if len(childrenSet) == 0 {
delete(c.parentUIDToChildren, parentUID)
// Clean up empty entries
if len(c.parentUIDToChildren[parentUID]) == 0 {
delete(c.parentUIDToChildren, parentUID)
}
return
}
}
}
@@ -1020,7 +1013,7 @@ func (c *clusterCache) sync() error {
c.apisMeta = make(map[schema.GroupKind]*apiMeta)
c.resources = make(map[kube.ResourceKey]*Resource)
c.namespacedResources = make(map[schema.GroupKind]bool)
c.parentUIDToChildren = make(map[types.UID]map[kube.ResourceKey]struct{})
c.parentUIDToChildren = make(map[types.UID][]kube.ResourceKey)
config := c.config
version, err := c.kubectl.GetServerVersion(config)
if err != nil {
@@ -1119,6 +1112,9 @@ func (c *clusterCache) sync() error {
return fmt.Errorf("failed to sync cluster %s: %w", c.config.Host, err)
}
// Rebuild orphaned children index after all resources are loaded
c.rebuildParentToChildrenIndex()
c.log.Info("Cluster successfully synced")
return nil
}
@@ -1259,8 +1255,8 @@ func (c *clusterCache) processCrossNamespaceChildren(
}
// Use parent-to-children index for O(1) lookup of direct children
childrenSet := c.parentUIDToChildren[clusterResource.Ref.UID]
for childKey := range childrenSet {
childKeys := c.parentUIDToChildren[clusterResource.Ref.UID]
for _, childKey := range childKeys {
child := c.resources[childKey]
if child == nil {
continue
@@ -1313,8 +1309,8 @@ func (c *clusterCache) iterateChildrenUsingIndex(
action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool,
) {
// Look up direct children of this parent using the index
childrenSet := c.parentUIDToChildren[parent.Ref.UID]
for childKey := range childrenSet {
childKeys := c.parentUIDToChildren[parent.Ref.UID]
for _, childKey := range childKeys {
if actionCallState[childKey] != notCalled {
continue // action() already called or in progress
}
@@ -1634,10 +1630,6 @@ func (c *clusterCache) onNodeRemoved(key kube.ResourceKey) {
for k, v := range ns {
if mightHaveInferredOwner(v) && existing.isInferredParentOf(k) {
v.setOwnerRef(existing.toOwnerRef(), false)
// Update index inline when removing inferred ref
if existing.Ref.UID != "" {
c.removeFromParentUIDToChildren(existing.Ref.UID, k)
}
}
}
}

View File

@@ -416,128 +416,6 @@ func TestStatefulSetOwnershipInferred(t *testing.T) {
}
}
// TestStatefulSetPVC_ParentToChildrenIndex verifies that inferred StatefulSet → PVC
// relationships are correctly captured in the parentUIDToChildren index during initial sync.
//
// The index is updated inline when inferred owner refs are added in setNode()
// (see the inferred parent handling section in clusterCache.setNode).
func TestStatefulSetPVC_ParentToChildrenIndex(t *testing.T) {
stsUID := types.UID("sts-uid-123")
// StatefulSet with volumeClaimTemplate named "data"
sts := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind},
ObjectMeta: metav1.ObjectMeta{UID: stsUID, Name: "web", Namespace: "default"},
Spec: appsv1.StatefulSetSpec{
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{
ObjectMeta: metav1.ObjectMeta{Name: "data"},
}},
},
}
// PVCs that match the StatefulSet's volumeClaimTemplate pattern: <template>-<sts>-<ordinal>
// These have NO explicit owner references - the relationship is INFERRED
pvc0 := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
ObjectMeta: metav1.ObjectMeta{UID: "pvc-0-uid", Name: "data-web-0", Namespace: "default"},
}
pvc1 := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
ObjectMeta: metav1.ObjectMeta{UID: "pvc-1-uid", Name: "data-web-1", Namespace: "default"},
}
// Create cluster with all resources
// Must add PersistentVolumeClaim to API resources since it's not in the default set
cluster := newCluster(t, sts, pvc0, pvc1).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: kube.PersistentVolumeClaimKind},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"},
Meta: metav1.APIResource{Namespaced: true},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
// Verify the parentUIDToChildren index contains the inferred relationships
cluster.lock.RLock()
defer cluster.lock.RUnlock()
pvc0Key := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "data-web-0"}
pvc1Key := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "data-web-1"}
children, ok := cluster.parentUIDToChildren[stsUID]
require.True(t, ok, "StatefulSet should have entry in parentUIDToChildren index")
require.Contains(t, children, pvc0Key, "PVC data-web-0 should be in StatefulSet's children (inferred relationship)")
require.Contains(t, children, pvc1Key, "PVC data-web-1 should be in StatefulSet's children (inferred relationship)")
// Also verify the OwnerRefs were set correctly on the PVCs
pvc0Resource := cluster.resources[pvc0Key]
require.NotNil(t, pvc0Resource)
require.Len(t, pvc0Resource.OwnerRefs, 1, "PVC0 should have inferred owner ref")
require.Equal(t, stsUID, pvc0Resource.OwnerRefs[0].UID, "PVC0 owner should be the StatefulSet")
pvc1Resource := cluster.resources[pvc1Key]
require.NotNil(t, pvc1Resource)
require.Len(t, pvc1Resource.OwnerRefs, 1, "PVC1 should have inferred owner ref")
require.Equal(t, stsUID, pvc1Resource.OwnerRefs[0].UID, "PVC1 owner should be the StatefulSet")
}
// TestStatefulSetPVC_WatchEvent_IndexUpdated verifies that when a PVC is added
// via watch event (after initial sync), both the inferred owner reference AND
// the parentUIDToChildren index are updated correctly.
//
// This tests the inline index update logic in setNode() which updates the index
// immediately when inferred owner refs are added.
func TestStatefulSetPVC_WatchEvent_IndexUpdated(t *testing.T) {
stsUID := types.UID("sts-uid-456")
// StatefulSet with volumeClaimTemplate
sts := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: kube.StatefulSetKind},
ObjectMeta: metav1.ObjectMeta{UID: stsUID, Name: "db", Namespace: "default"},
Spec: appsv1.StatefulSetSpec{
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{
ObjectMeta: metav1.ObjectMeta{Name: "storage"},
}},
},
}
// Create cluster with ONLY the StatefulSet - PVC will be added via watch event
cluster := newCluster(t, sts).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: kube.PersistentVolumeClaimKind},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"},
Meta: metav1.APIResource{Namespaced: true},
}})
err := cluster.EnsureSynced()
require.NoError(t, err)
// PVC that matches the StatefulSet's volumeClaimTemplate pattern
// Added via watch event AFTER initial sync
pvc := &corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kube.PersistentVolumeClaimKind},
ObjectMeta: metav1.ObjectMeta{UID: "pvc-watch-uid", Name: "storage-db-0", Namespace: "default"},
}
// Simulate watch event adding the PVC
cluster.lock.Lock()
cluster.setNode(cluster.newResource(mustToUnstructured(pvc)))
cluster.lock.Unlock()
cluster.lock.RLock()
defer cluster.lock.RUnlock()
pvcKey := kube.ResourceKey{Group: "", Kind: kube.PersistentVolumeClaimKind, Namespace: "default", Name: "storage-db-0"}
// Verify the OwnerRef IS correctly set
pvcResource := cluster.resources[pvcKey]
require.NotNil(t, pvcResource, "PVC should exist in cache")
require.Len(t, pvcResource.OwnerRefs, 1, "PVC should have inferred owner ref from StatefulSet")
require.Equal(t, stsUID, pvcResource.OwnerRefs[0].UID, "Owner should be the StatefulSet")
// Verify the index IS updated for inferred refs via watch events
children, indexUpdated := cluster.parentUIDToChildren[stsUID]
require.True(t, indexUpdated, "Index should be updated when inferred refs are added via watch events")
require.Contains(t, children, pvcKey, "PVC should be in StatefulSet's children (inferred relationship)")
}
func TestEnsureSyncedSingleNamespace(t *testing.T) {
obj1 := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -2420,226 +2298,3 @@ func TestIterateHierarchyV2_CircularOwnerChain_NoStackOverflow(t *testing.T) {
assert.Equal(t, 1, visitCount["resource-a"], "resource-a should be visited exactly once")
assert.Equal(t, 1, visitCount["resource-b"], "resource-b should be visited exactly once")
}
// BenchmarkSync_ParentToChildrenIndex measures the overhead of parent-to-children index
// operations during sync. This benchmark was created to investigate performance regression
// reported in https://github.com/argoproj/argo-cd/issues/26863
//
// The index is now maintained with O(1) operations (set-based) and updated inline
// in setNode() for both explicit and inferred owner refs. No rebuild is needed.
//
// This benchmark measures sync performance with resources that have owner references
// to quantify the index-building overhead at different scales.
func BenchmarkSync_ParentToChildrenIndex(b *testing.B) {
testCases := []struct {
name string
totalResources int
pctWithOwnerRefs int // Percentage of resources with owner references
}{
// Baseline: no owner refs (index operations are no-ops)
{"1000res_0pctOwnerRefs", 1000, 0},
{"5000res_0pctOwnerRefs", 5000, 0},
{"10000res_0pctOwnerRefs", 10000, 0},
// Typical case: ~80% of resources have owner refs (pods owned by RS, RS owned by Deployment)
{"1000res_80pctOwnerRefs", 1000, 80},
{"5000res_80pctOwnerRefs", 5000, 80},
{"10000res_80pctOwnerRefs", 10000, 80},
// Heavy case: all resources have owner refs
{"1000res_100pctOwnerRefs", 1000, 100},
{"5000res_100pctOwnerRefs", 5000, 100},
{"10000res_100pctOwnerRefs", 10000, 100},
// Stress test: larger scale
{"20000res_80pctOwnerRefs", 20000, 80},
}
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
resources := make([]runtime.Object, 0, tc.totalResources)
// Create parent resources (deployments) - these won't have owner refs
numParents := tc.totalResources / 10 // 10% are parents
if numParents < 1 {
numParents = 1
}
parentUIDs := make([]types.UID, numParents)
for i := 0; i < numParents; i++ {
uid := types.UID(fmt.Sprintf("deploy-uid-%d", i))
parentUIDs[i] = uid
resources = append(resources, &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("deploy-%d", i),
Namespace: "default",
UID: uid,
},
})
}
// Create child resources (pods) - some with owner refs
numChildren := tc.totalResources - numParents
numWithOwnerRefs := (numChildren * tc.pctWithOwnerRefs) / 100
for i := 0; i < numChildren; i++ {
pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%d", i),
Namespace: "default",
UID: types.UID(fmt.Sprintf("pod-uid-%d", i)),
},
}
// Add owner refs to the first numWithOwnerRefs pods
if i < numWithOwnerRefs {
parentIdx := i % numParents
pod.OwnerReferences = []metav1.OwnerReference{{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: fmt.Sprintf("deploy-%d", parentIdx),
UID: parentUIDs[parentIdx],
}}
}
resources = append(resources, pod)
}
cluster := newCluster(b, resources...)
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
// sync() reinitializes resources, parentUIDToChildren, etc. at the start,
// so no manual reset is needed here.
err := cluster.sync()
if err != nil {
b.Fatal(err)
}
}
})
}
}
// BenchmarkUpdateParentUIDToChildren measures the cost of incremental index updates
// during setNode. This is called for EVERY resource during sync. The index uses
// set-based storage so add/remove operations are O(1) regardless of children count.
func BenchmarkUpdateParentUIDToChildren(b *testing.B) {
testCases := []struct {
name string
childrenPerParent int
}{
{"10children", 10},
{"50children", 50},
{"100children", 100},
{"500children", 500},
{"1000children", 1000},
}
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
cluster := newCluster(b)
err := cluster.EnsureSynced()
if err != nil {
b.Fatal(err)
}
parentUID := types.UID("parent-uid")
// Pre-populate with existing children
childrenSet := make(map[kube.ResourceKey]struct{})
for i := 0; i < tc.childrenPerParent; i++ {
childKey := kube.ResourceKey{
Group: "",
Kind: "Pod",
Namespace: "default",
Name: fmt.Sprintf("existing-child-%d", i),
}
childrenSet[childKey] = struct{}{}
}
cluster.parentUIDToChildren[parentUID] = childrenSet
// Create a new child key to add
newChildKey := kube.ResourceKey{
Group: "",
Kind: "Pod",
Namespace: "default",
Name: "new-child",
}
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
// Simulate adding a new child - O(1) set insertion
cluster.addToParentUIDToChildren(parentUID, newChildKey)
// Remove it so we can add it again in the next iteration
cluster.removeFromParentUIDToChildren(parentUID, newChildKey)
}
})
}
}
// BenchmarkIncrementalIndexBuild measures the cost of incremental index updates
// via addToParentUIDToChildren during sync. The index uses O(1) set-based operations.
//
// This benchmark was created to investigate issue #26863 and verify the fix.
func BenchmarkIncrementalIndexBuild(b *testing.B) {
testCases := []struct {
name string
numParents int
childrenPerParent int
}{
{"100parents_10children", 100, 10},
{"100parents_50children", 100, 50},
{"100parents_100children", 100, 100},
{"1000parents_10children", 1000, 10},
{"1000parents_100children", 1000, 100},
}
for _, tc := range testCases {
// Benchmark incremental approach (what happens during setNode)
b.Run(tc.name+"_incremental", func(b *testing.B) {
cluster := newCluster(b)
err := cluster.EnsureSynced()
if err != nil {
b.Fatal(err)
}
// Prepare parent UIDs and child keys
type childInfo struct {
parentUID types.UID
childKey kube.ResourceKey
}
children := make([]childInfo, 0, tc.numParents*tc.childrenPerParent)
for p := 0; p < tc.numParents; p++ {
parentUID := types.UID(fmt.Sprintf("parent-%d", p))
for c := 0; c < tc.childrenPerParent; c++ {
children = append(children, childInfo{
parentUID: parentUID,
childKey: kube.ResourceKey{
Kind: "Pod",
Namespace: "default",
Name: fmt.Sprintf("child-%d-%d", p, c),
},
})
}
}
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
// Clear the index
cluster.parentUIDToChildren = make(map[types.UID]map[kube.ResourceKey]struct{})
// Simulate incremental adds (O(1) set insertions)
for _, child := range children {
cluster.addToParentUIDToChildren(child.parentUID, child.childKey)
}
}
})
}
}

2
go.mod
View File

@@ -112,7 +112,7 @@ require (
k8s.io/apimachinery v0.34.0
k8s.io/client-go v0.34.0
k8s.io/code-generator v0.34.0
k8s.io/klog/v2 v2.140.0
k8s.io/klog/v2 v2.130.1
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
k8s.io/kubectl v0.34.0
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect

3
go.sum
View File

@@ -1479,9 +1479,8 @@ k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJez
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/klog/v2 v2.140.0 h1:Tf+J3AH7xnUzZyVVXhTgGhEKnFqye14aadWv7bzXdzc=
k8s.io/klog/v2 v2.140.0/go.mod h1:o+/RWfJ6PwpnFn7OyAG3QnO47BFsymfEfrz6XyYSSp0=
k8s.io/kube-aggregator v0.34.0 h1:XE4u+HOYkj0g44sblhTtPv+QyIIK7sJxrIlia0731kE=
k8s.io/kube-aggregator v0.34.0/go.mod h1:GIUqdChXVC448Vp2Wgxf0m6fir7Xt3A2TAZcs2JNG1Y=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=

View File

@@ -17,7 +17,7 @@ import {
import {AppsListPreferences, AppSetsListPreferences, services} from '../../../shared/services';
import {Filter, FiltersGroup} from '../filter/filter';
import {createMetadataSelector} from '../selectors';
import {ComparisonStatusIcon, getAppAllSources, getAppSetHealthStatus, HealthStatusIcon, getOperationStateTitle} from '../utils';
import {ComparisonStatusIcon, getAppSetHealthStatus, HealthStatusIcon, getOperationStateTitle} from '../utils';
import {formatClusterQueryParam} from '../../../shared/utils';
import {COLORS} from '../../../shared/components/colors';
@@ -27,7 +27,6 @@ export interface FilterResult {
health: boolean;
clusters: boolean;
namespaces: boolean;
targetRevision: boolean;
operation: boolean;
annotations: boolean;
favourite: boolean;
@@ -60,41 +59,31 @@ export function getAppFilterResults(applications: Application[], pref: AppsListP
const labelSelector = createMetadataSelector(pref.labelsFilter || []);
const annotationSelector = createMetadataSelector(pref.annotationsFilter || []);
return applications.map(app => {
const targetRevisions = getAppAllSources(app)
.map(source => source.targetRevision)
.filter((item): item is string => !!item);
return {
...app,
filterResult: {
sync: pref.syncFilter.length === 0 || pref.syncFilter.includes(app.status.sync.status),
autosync: pref.autoSyncFilter.length === 0 || pref.autoSyncFilter.includes(getAutoSyncStatus(app.spec.syncPolicy)),
health: pref.healthFilter.length === 0 || pref.healthFilter.includes(app.status.health.status),
namespaces: pref.namespacesFilter.length === 0 || pref.namespacesFilter.some(ns => app.spec.destination.namespace && minimatch(app.spec.destination.namespace, ns)),
favourite: !pref.showFavorites || (pref.favoritesAppList && pref.favoritesAppList.includes(app.metadata.name)),
clusters:
pref.clustersFilter.length === 0 ||
pref.clustersFilter.some(filterString => {
const match = filterString.match('^(.*) [(](http.*)[)]$');
if (match?.length === 3) {
const [, name, url] = match;
return url === app.spec.destination.server || name === app.spec.destination.name;
} else {
const inputMatch = filterString.match('^http.*$');
return (
(inputMatch && inputMatch[0] === app.spec.destination.server) || (app.spec.destination.name && minimatch(app.spec.destination.name, filterString))
);
}
}),
targetRevision:
pref.targetRevisionFilter.length === 0 || pref.targetRevisionFilter.some(filter => targetRevisions.some(targetRevision => minimatch(targetRevision, filter))),
labels: pref.labelsFilter.length === 0 || labelSelector(app.metadata.labels),
annotations: pref.annotationsFilter.length === 0 || annotationSelector(app.metadata.annotations),
operation: pref.operationFilter.length === 0 || pref.operationFilter.includes(getOperationStateTitle(app))
}
};
});
return applications.map(app => ({
...app,
filterResult: {
sync: pref.syncFilter.length === 0 || pref.syncFilter.includes(app.status.sync.status),
autosync: pref.autoSyncFilter.length === 0 || pref.autoSyncFilter.includes(getAutoSyncStatus(app.spec.syncPolicy)),
health: pref.healthFilter.length === 0 || pref.healthFilter.includes(app.status.health.status),
namespaces: pref.namespacesFilter.length === 0 || pref.namespacesFilter.some(ns => app.spec.destination.namespace && minimatch(app.spec.destination.namespace, ns)),
favourite: !pref.showFavorites || (pref.favoritesAppList && pref.favoritesAppList.includes(app.metadata.name)),
clusters:
pref.clustersFilter.length === 0 ||
pref.clustersFilter.some(filterString => {
const match = filterString.match('^(.*) [(](http.*)[)]$');
if (match?.length === 3) {
const [, name, url] = match;
return url === app.spec.destination.server || name === app.spec.destination.name;
} else {
const inputMatch = filterString.match('^http.*$');
return (inputMatch && inputMatch[0] === app.spec.destination.server) || (app.spec.destination.name && minimatch(app.spec.destination.name, filterString));
}
}),
labels: pref.labelsFilter.length === 0 || labelSelector(app.metadata.labels),
annotations: pref.annotationsFilter.length === 0 || annotationSelector(app.metadata.annotations),
operation: pref.operationFilter.length === 0 || pref.operationFilter.includes(getOperationStateTitle(app))
}
}));
}
export function getAppSetFilterResults(appSets: ApplicationSet[], pref: AppSetsListPreferences): ApplicationSetFilteredApp[] {
@@ -372,26 +361,6 @@ const NamespaceFilter = React.memo((props: AppFilterProps) => {
);
});
const TargetRevisionFilter = (props: AppFilterProps) => {
const targetRevisionOptions = React.useMemo(
() =>
optionsFrom(
Array.from(new Set(props.apps.flatMap(app => getAppAllSources(app).map(source => source.targetRevision)).filter((item): item is string => !!item))),
props.pref.targetRevisionFilter
),
[props.apps, props.pref.targetRevisionFilter]
);
return (
<Filter
label='TARGET REVISION'
selected={props.pref.targetRevisionFilter}
setSelected={s => props.onChange({...props.pref, targetRevisionFilter: s})}
field={true}
options={targetRevisionOptions}
/>
);
};
const FavoriteFilter = (props: {value: boolean; onChange: (showFavorites: boolean) => void}) => {
const onChange = (val: boolean) => {
props.onChange(val);
@@ -499,11 +468,9 @@ export const ApplicationsFilter = (props: AppFilterProps) => {
...(props.pref.healthFilter || []),
...(props.pref.operationFilter || []),
...(props.pref.labelsFilter || []),
...(props.pref.annotationsFilter || []),
...(props.pref.projectsFilter || []),
...(props.pref.clustersFilter || []),
...(props.pref.namespacesFilter || []),
...(props.pref.targetRevisionFilter || []),
...(props.pref.autoSyncFilter || []),
...(props.pref.showFavorites ? ['favorites'] : [])
];
@@ -525,7 +492,6 @@ export const ApplicationsFilter = (props: AppFilterProps) => {
<ProjectFilter {...props} />
<ClusterFilter {...props} />
<NamespaceFilter {...props} />
<TargetRevisionFilter {...props} />
<AutoSyncFilter {...props} collapsed={true} />
</FiltersGroup>
);

View File

@@ -160,13 +160,6 @@ const ViewPref = ({children}: {children: (pref: AppsListPreferences & {page: num
.split(',')
.filter(item => !!item);
}
if (params.get('targetRevision') != null) {
viewPref.targetRevisionFilter = params
.get('targetRevision')
.split(',')
.map(decodeURIComponent)
.filter(item => !!item);
}
if (params.get('cluster') != null) {
viewPref.clustersFilter = params
.get('cluster')
@@ -480,7 +473,6 @@ export const ApplicationsList = (props: RouteComponentProps<any> & {objectListKi
autoSync: newPref.autoSyncFilter.join(','),
health: newPref.healthFilter.join(','),
namespace: newPref.namespacesFilter.join(','),
targetRevision: newPref.targetRevisionFilter.map(encodeURIComponent).join(','),
cluster: newPref.clustersFilter.join(','),
labels: newPref.labelsFilter.map(encodeURIComponent).join(','),
annotations: newPref.annotationsFilter.map(encodeURIComponent).join(','),

View File

@@ -1479,34 +1479,6 @@ export function getAppDrySource(app?: appModels.Application): appModels.Applicat
return {repoURL, targetRevision, path};
}
// getAppAllSources gets all app sources as an array. For single source apps, returns [source].
// For multi-source apps, returns the sources array. For sourceHydrator apps, returns a single synthesized source.
export function getAppAllSources(app?: appModels.Application): appModels.ApplicationSource[] {
if (!app) {
return [];
}
if (app.spec.sourceHydrator) {
return [
{
repoURL: app.spec.sourceHydrator.drySource.repoURL,
targetRevision: app.spec.sourceHydrator.syncSource.targetBranch,
path: app.spec.sourceHydrator.syncSource.path
} as appModels.ApplicationSource
];
}
if (app.spec.sources && app.spec.sources.length > 0) {
return app.spec.sources;
}
if (app.spec.source) {
return [app.spec.source];
}
return [];
}
// getAppDefaultSyncRevision gets the first app revisions from `status.sync.revisions` or, if that list is missing or empty, the `revision`
// field.
export function getAppDefaultSyncRevision(app?: appModels.Application) {

View File

@@ -91,7 +91,6 @@ export class AppsListPreferences extends AbstractAppsListPreferences {
pref.clustersFilter = [];
pref.namespacesFilter = [];
pref.targetRevisionFilter = [];
pref.projectsFilter = [];
pref.syncFilter = [];
pref.autoSyncFilter = [];
@@ -103,7 +102,6 @@ export class AppsListPreferences extends AbstractAppsListPreferences {
public autoSyncFilter: string[];
public namespacesFilter: string[];
public clustersFilter: string[];
public targetRevisionFilter: string[];
public operationFilter: string[];
}
@@ -158,7 +156,6 @@ const DEFAULT_PREFERENCES: ViewPreferences = {
annotationsFilter: new Array<string>(),
projectsFilter: new Array<string>(),
namespacesFilter: new Array<string>(),
targetRevisionFilter: new Array<string>(),
clustersFilter: new Array<string>(),
syncFilter: new Array<string>(),
autoSyncFilter: new Array<string>(),
@@ -231,7 +228,6 @@ export class ViewPreferencesService {
appList.annotationsFilter = appList.annotationsFilter || [];
appList.projectsFilter = appList.projectsFilter || [];
appList.namespacesFilter = appList.namespacesFilter || [];
appList.targetRevisionFilter = appList.targetRevisionFilter || [];
appList.clustersFilter = appList.clustersFilter || [];
appList.syncFilter = appList.syncFilter || [];
appList.autoSyncFilter = appList.autoSyncFilter || [];

View File

@@ -9705,9 +9705,9 @@ yaml-ast-parser@0.0.43:
integrity sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==
yaml@^1.10.0:
version "1.10.3"
resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.3.tgz#76e407ed95c42684fb8e14641e5de62fe65bbcb3"
integrity sha512-vIYeF1u3CjlhAFekPPAk2h/Kv4T3mAkMox5OymRiJQB0spDP10LHvt+K7G9Ny6NuuMAb25/6n1qyUjAcGNf/AA==
version "1.10.2"
resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b"
integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==
yargs-parser@^20.2.2:
version "20.2.9"

View File

@@ -207,9 +207,6 @@ func SetLogLevel(logLevel string) {
// SetGLogLevel set the glog level for the k8s go-client
func SetGLogLevel(glogLevel int) {
klog.InitFlags(nil)
// Opt into fixed stderrthreshold behavior (kubernetes/klog#212).
_ = flag.Set("legacy_stderr_threshold_behavior", "false")
_ = flag.Set("stderrthreshold", "INFO")
_ = flag.Set("logtostderr", "true")
_ = flag.Set("v", strconv.Itoa(glogLevel))
}

View File

@@ -685,11 +685,10 @@ func DiscoverGitHubAppInstallationID(ctx context.Context, appId int64, privateKe
opts.Page = resp.NextPage
}
// Cache each installation under its account's key so multiple orgs do not overwrite each other.
// Cache all installation IDs
for _, installation := range allInstallations {
if installation.Account != nil && installation.Account.Login != nil && installation.ID != nil {
instKey := fmt.Sprintf("%s:%s:%d", strings.ToLower(*installation.Account.Login), domain, appId)
githubInstallationIdCache.Set(instKey, *installation.ID, gocache.DefaultExpiration)
githubInstallationIdCache.Set(cacheKey, *installation.ID, gocache.DefaultExpiration)
}
}

View File

@@ -600,35 +600,6 @@ func TestDiscoverGitHubAppInstallationID(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, int64(98765), actualId)
})
t.Run("returns correct installation ID when app is installed on multiple orgs", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, "/app/installations") {
w.WriteHeader(http.StatusOK)
//nolint:errcheck
json.NewEncoder(w).Encode([]map[string]any{
{"id": 11111, "account": map[string]any{"login": "org-alpha"}},
{"id": 22222, "account": map[string]any{"login": "target-org"}},
{"id": 33333, "account": map[string]any{"login": "org-gamma"}},
})
return
}
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
t.Cleanup(func() {
domain, _ := domainFromBaseURL(server.URL)
for _, org := range []string{"org-alpha", "target-org", "org-gamma"} {
githubInstallationIdCache.Delete(fmt.Sprintf("%s:%s:%d", org, domain, 12345))
}
})
ctx := context.Background()
actualId, err := DiscoverGitHubAppInstallationID(ctx, 12345, fakeGitHubAppPrivateKey, server.URL, "target-org")
require.NoError(t, err)
assert.Equal(t, int64(22222), actualId, "should return the installation ID for the requested org, not the last one in the list")
})
}
func TestExtractOrgFromRepoURL(t *testing.T) {