feat(controller): Annotation to pause reconciliation for a specific cluster (#26442)

Signed-off-by: Drew Bailey <drew.bailey@airbnb.com>
Co-authored-by: Drew Bailey <drew.bailey@airbnb.com>
This commit is contained in:
Drew Bailey
2026-02-18 20:30:39 -05:00
committed by GitHub
parent 7acd9305df
commit ed6d2c525e
6 changed files with 116 additions and 1 deletions

View File

@@ -2,10 +2,12 @@ package sharding
import ( import (
"maps" "maps"
"strconv"
"sync" "sync"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/util/db" "github.com/argoproj/argo-cd/v3/util/db"
) )
@@ -61,6 +63,10 @@ func (sharding *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard
return true return true
} }
if skipReconcile, err := strconv.ParseBool(c.Annotations[common.AnnotationKeyAppSkipReconcile]); err == nil && skipReconcile {
log.Debugf("Cluster %s has %s annotation set, skipping", c.Server, common.AnnotationKeyAppSkipReconcile)
return false
}
clusterShard := 0 clusterShard := 0
if shard, ok := sharding.Shards[c.Server]; ok { if shard, ok := sharding.Shards[c.Server]; ok {
clusterShard = shard clusterShard = shard

View File

@@ -5,6 +5,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/argoproj/argo-cd/v3/common"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1" "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
dbmocks "github.com/argoproj/argo-cd/v3/util/db/mocks" dbmocks "github.com/argoproj/argo-cd/v3/util/db/mocks"
) )
@@ -322,6 +323,28 @@ func TestClusterSharding_IsManagedCluster(t *testing.T) {
})) }))
} }
func TestIsManagedCluster_SkipReconcileAnnotation(t *testing.T) {
sharding := setupTestSharding(0, 1)
sharding.Init(
&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{{ID: "1", Server: "https://cluster1"}}},
&v1alpha1.ApplicationList{},
)
assert.True(t, sharding.IsManagedCluster(&v1alpha1.Cluster{Server: "https://cluster1"}))
assert.False(t, sharding.IsManagedCluster(&v1alpha1.Cluster{
Server: "https://cluster1",
Annotations: map[string]string{common.AnnotationKeyAppSkipReconcile: "true"},
}))
assert.True(t, sharding.IsManagedCluster(&v1alpha1.Cluster{
Server: "https://cluster1",
Annotations: map[string]string{common.AnnotationKeyAppSkipReconcile: "false"},
}))
assert.True(t, sharding.IsManagedCluster(nil))
}
func TestClusterSharding_ClusterShardOfResourceShouldNotBeChanged(t *testing.T) { func TestClusterSharding_ClusterShardOfResourceShouldNotBeChanged(t *testing.T) {
shard := 1 shard := 1
replicas := 2 replicas := 2

View File

@@ -13,6 +13,23 @@ If you're unsure about the context names, run `kubectl config get-contexts` to g
This will connect to the cluster and install the necessary resources for ArgoCD to connect to it. This will connect to the cluster and install the necessary resources for ArgoCD to connect to it.
Note that you will need privileged access to the cluster. Note that you will need privileged access to the cluster.
## Skipping cluster reconciliation
You can stop the controller from reconciling a cluster without removing it by annotating its secret:
```bash
kubectl -n argocd annotate secret <cluster-secret-name> argocd.argoproj.io/skip-reconcile=true
```
The cluster will still appear in `argocd cluster list` but the controller will skip reconciliation
for all apps targeting it. To resume, remove the annotation:
```bash
kubectl -n argocd annotate secret <cluster-secret-name> argocd.argoproj.io/skip-reconcile-
```
See [Declarative Setup - Skipping Cluster Reconciliation](./declarative-setup.md#skipping-cluster-reconciliation) for details.
## Removing a cluster ## Removing a cluster
Run `argocd cluster rm context-name`. Run `argocd cluster rm context-name`.

View File

@@ -595,6 +595,49 @@ stringData:
} }
``` ```
### Skipping Cluster Reconciliation
You can prevent the application controller from reconciling all apps targeting a cluster by annotating its
secret with `argocd.argoproj.io/skip-reconcile: "true"`. This uses the same annotation as
[Skip Application Reconcile](../user-guide/skip_reconcile.md), but applied at the cluster level.
The cluster remains visible in API responses (`argocd cluster list`), but the controller treats it as unmanaged.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: mycluster-secret
labels:
argocd.argoproj.io/secret-type: cluster
annotations:
argocd.argoproj.io/skip-reconcile: "true"
type: Opaque
stringData:
name: mycluster.example.com
server: https://mycluster.example.com
config: |
{
"bearerToken": "<authentication token>",
"tlsClientConfig": {
"insecure": false,
"caData": "<base64 encoded certificate>"
}
}
```
To skip an existing cluster:
```bash
kubectl -n argocd annotate secret mycluster-secret argocd.argoproj.io/skip-reconcile=true
```
To resume reconciliation:
```bash
kubectl -n argocd annotate secret mycluster-secret argocd.argoproj.io/skip-reconcile-
```
### EKS ### EKS
EKS cluster secret example using argocd-k8s-auth and [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) and [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html): EKS cluster secret example using argocd-k8s-auth and [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) and [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html):

View File

@@ -11,7 +11,7 @@
| argocd.argoproj.io/manifest-generate-paths | Application | [see scaling docs](../operator-manual/high_availability.md#manifest-paths-annotation) | Used to avoid unnecessary Application refreshes, especially in mono-repos. | | argocd.argoproj.io/manifest-generate-paths | Application | [see scaling docs](../operator-manual/high_availability.md#manifest-paths-annotation) | Used to avoid unnecessary Application refreshes, especially in mono-repos. |
| argocd.argoproj.io/managed-by-url | Application | A valid http(s) URL | Specifies the URL of the Argo CD instance managing the application. Used to correctly link to applications managed by a different Argo CD instance. See [managed-by-url docs](../operator-manual/managed-by-url.md) for details. | | argocd.argoproj.io/managed-by-url | Application | A valid http(s) URL | Specifies the URL of the Argo CD instance managing the application. Used to correctly link to applications managed by a different Argo CD instance. See [managed-by-url docs](../operator-manual/managed-by-url.md) for details. |
| argocd.argoproj.io/refresh | Application | `normal`, `hard` | Indicates that app needs to be refreshed. Removed by application controller after app is refreshed. Value `"hard"` means manifest cache and target cluster state cache should be invalidated before refresh. | | argocd.argoproj.io/refresh | Application | `normal`, `hard` | Indicates that app needs to be refreshed. Removed by application controller after app is refreshed. Value `"hard"` means manifest cache and target cluster state cache should be invalidated before refresh. |
| argocd.argoproj.io/skip-reconcile | Application | `"true"` | Indicates to the Argo CD application controller that the Application should not be reconciled. See the [skip reconcile documentation](skip_reconcile.md) for use cases. | | argocd.argoproj.io/skip-reconcile | Application, Cluster Secret | `"true"` | On an Application, skips reconciliation for that app. On a cluster secret, skips reconciliation for all apps targeting that cluster. See [skip reconcile docs](skip_reconcile.md). |
| argocd.argoproj.io/sync-options | any | [see sync options docs](sync-options.md) | Provides a variety of settings to determine how an Application's resources are synced. | | argocd.argoproj.io/sync-options | any | [see sync options docs](sync-options.md) | Provides a variety of settings to determine how an Application's resources are synced. |
| argocd.argoproj.io/sync-wave | any | [see sync waves docs](sync-waves.md) | | | argocd.argoproj.io/sync-wave | any | [see sync waves docs](sync-waves.md) | |
| argocd.argoproj.io/tracking-id | any | any | Used by Argo CD to track resources it manages. See [resource tracking docs](resource_tracking.md) for details. | | argocd.argoproj.io/tracking-id | any | any | Used by Argo CD to track resources it manages. See [resource tracking docs](resource_tracking.md) for details. |

View File

@@ -217,6 +217,32 @@ func TestClusterURLInRestAPI(t *testing.T) {
assert.Equal(t, map[string]string{"test": "val"}, cluster.Labels) assert.Equal(t, map[string]string{"test": "val"}, cluster.Labels)
} }
func TestClusterSkipReconcileAnnotation(t *testing.T) {
fixture.EnsureCleanState(t)
clusterURL := url.QueryEscape(KubernetesInternalAPIServerAddr)
var cluster Cluster
err := fixture.DoHttpJsonRequest("PUT",
fmt.Sprintf("/api/v1/clusters/%s?updatedFields=annotations", clusterURL),
&cluster,
fmt.Appendf(nil, `{"annotations":{%q:"true"}}`, "argocd.argoproj.io/skip-reconcile")...)
require.NoError(t, err)
assert.Equal(t, "true", cluster.Annotations["argocd.argoproj.io/skip-reconcile"])
var cluster2 Cluster
err = fixture.DoHttpJsonRequest("GET", "/api/v1/clusters/"+clusterURL, &cluster2)
require.NoError(t, err)
assert.Equal(t, "in-cluster", cluster2.Name)
assert.Equal(t, "true", cluster2.Annotations["argocd.argoproj.io/skip-reconcile"])
err = fixture.DoHttpJsonRequest("PUT",
fmt.Sprintf("/api/v1/clusters/%s?updatedFields=annotations", clusterURL),
&cluster,
[]byte(`{"annotations":{}}`)...)
require.NoError(t, err)
}
func TestClusterDeleteDenied(t *testing.T) { func TestClusterDeleteDenied(t *testing.T) {
ctx := accountFixture.Given(t) ctx := accountFixture.Given(t)
ctx.Name("test"). ctx.Name("test").