mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 01:28:45 +01:00
Compare commits
4 Commits
7acd9305df
...
9851b84f97
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9851b84f97 | ||
|
|
f2fdc65386 | ||
|
|
7180deb937 | ||
|
|
ed6d2c525e |
@@ -2,10 +2,12 @@ package sharding
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/db"
|
||||
)
|
||||
@@ -61,6 +63,10 @@ func (sharding *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
|
||||
if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard
|
||||
return true
|
||||
}
|
||||
if skipReconcile, err := strconv.ParseBool(c.Annotations[common.AnnotationKeyAppSkipReconcile]); err == nil && skipReconcile {
|
||||
log.Debugf("Cluster %s has %s annotation set, skipping", c.Server, common.AnnotationKeyAppSkipReconcile)
|
||||
return false
|
||||
}
|
||||
clusterShard := 0
|
||||
if shard, ok := sharding.Shards[c.Server]; ok {
|
||||
clusterShard = shard
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v3/util/db/mocks"
|
||||
)
|
||||
@@ -322,6 +323,28 @@ func TestClusterSharding_IsManagedCluster(t *testing.T) {
|
||||
}))
|
||||
}
|
||||
|
||||
func TestIsManagedCluster_SkipReconcileAnnotation(t *testing.T) {
|
||||
sharding := setupTestSharding(0, 1)
|
||||
sharding.Init(
|
||||
&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{{ID: "1", Server: "https://cluster1"}}},
|
||||
&v1alpha1.ApplicationList{},
|
||||
)
|
||||
|
||||
assert.True(t, sharding.IsManagedCluster(&v1alpha1.Cluster{Server: "https://cluster1"}))
|
||||
|
||||
assert.False(t, sharding.IsManagedCluster(&v1alpha1.Cluster{
|
||||
Server: "https://cluster1",
|
||||
Annotations: map[string]string{common.AnnotationKeyAppSkipReconcile: "true"},
|
||||
}))
|
||||
|
||||
assert.True(t, sharding.IsManagedCluster(&v1alpha1.Cluster{
|
||||
Server: "https://cluster1",
|
||||
Annotations: map[string]string{common.AnnotationKeyAppSkipReconcile: "false"},
|
||||
}))
|
||||
|
||||
assert.True(t, sharding.IsManagedCluster(nil))
|
||||
}
|
||||
|
||||
func TestClusterSharding_ClusterShardOfResourceShouldNotBeChanged(t *testing.T) {
|
||||
shard := 1
|
||||
replicas := 2
|
||||
|
||||
@@ -13,6 +13,23 @@ If you're unsure about the context names, run `kubectl config get-contexts` to g
|
||||
This will connect to the cluster and install the necessary resources for ArgoCD to connect to it.
|
||||
Note that you will need privileged access to the cluster.
|
||||
|
||||
## Skipping cluster reconciliation
|
||||
|
||||
You can stop the controller from reconciling a cluster without removing it by annotating its secret:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd annotate secret <cluster-secret-name> argocd.argoproj.io/skip-reconcile=true
|
||||
```
|
||||
|
||||
The cluster will still appear in `argocd cluster list` but the controller will skip reconciliation
|
||||
for all apps targeting it. To resume, remove the annotation:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd annotate secret <cluster-secret-name> argocd.argoproj.io/skip-reconcile-
|
||||
```
|
||||
|
||||
See [Declarative Setup - Skipping Cluster Reconciliation](./declarative-setup.md#skipping-cluster-reconciliation) for details.
|
||||
|
||||
## Removing a cluster
|
||||
|
||||
Run `argocd cluster rm context-name`.
|
||||
|
||||
@@ -595,6 +595,49 @@ stringData:
|
||||
}
|
||||
```
|
||||
|
||||
### Skipping Cluster Reconciliation
|
||||
|
||||
You can prevent the application controller from reconciling all apps targeting a cluster by annotating its
|
||||
secret with `argocd.argoproj.io/skip-reconcile: "true"`. This uses the same annotation as
|
||||
[Skip Application Reconcile](../user-guide/skip_reconcile.md), but applied at the cluster level.
|
||||
|
||||
The cluster remains visible in API responses (`argocd cluster list`), but the controller treats it as unmanaged.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mycluster-secret
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
annotations:
|
||||
argocd.argoproj.io/skip-reconcile: "true"
|
||||
type: Opaque
|
||||
stringData:
|
||||
name: mycluster.example.com
|
||||
server: https://mycluster.example.com
|
||||
config: |
|
||||
{
|
||||
"bearerToken": "<authentication token>",
|
||||
"tlsClientConfig": {
|
||||
"insecure": false,
|
||||
"caData": "<base64 encoded certificate>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To skip an existing cluster:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd annotate secret mycluster-secret argocd.argoproj.io/skip-reconcile=true
|
||||
```
|
||||
|
||||
To resume reconciliation:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd annotate secret mycluster-secret argocd.argoproj.io/skip-reconcile-
|
||||
```
|
||||
|
||||
### EKS
|
||||
|
||||
EKS cluster secret example using argocd-k8s-auth and [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) and [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html):
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
| argocd.argoproj.io/manifest-generate-paths | Application | [see scaling docs](../operator-manual/high_availability.md#manifest-paths-annotation) | Used to avoid unnecessary Application refreshes, especially in mono-repos. |
|
||||
| argocd.argoproj.io/managed-by-url | Application | A valid http(s) URL | Specifies the URL of the Argo CD instance managing the application. Used to correctly link to applications managed by a different Argo CD instance. See [managed-by-url docs](../operator-manual/managed-by-url.md) for details. |
|
||||
| argocd.argoproj.io/refresh | Application | `normal`, `hard` | Indicates that app needs to be refreshed. Removed by application controller after app is refreshed. Value `"hard"` means manifest cache and target cluster state cache should be invalidated before refresh. |
|
||||
| argocd.argoproj.io/skip-reconcile | Application | `"true"` | Indicates to the Argo CD application controller that the Application should not be reconciled. See the [skip reconcile documentation](skip_reconcile.md) for use cases. |
|
||||
| argocd.argoproj.io/skip-reconcile | Application, Cluster Secret | `"true"` | On an Application, skips reconciliation for that app. On a cluster secret, skips reconciliation for all apps targeting that cluster. See [skip reconcile docs](skip_reconcile.md). |
|
||||
| argocd.argoproj.io/sync-options | any | [see sync options docs](sync-options.md) | Provides a variety of settings to determine how an Application's resources are synced. |
|
||||
| argocd.argoproj.io/sync-wave | any | [see sync waves docs](sync-waves.md) | |
|
||||
| argocd.argoproj.io/tracking-id | any | any | Used by Argo CD to track resources it manages. See [resource tracking docs](resource_tracking.md) for details. |
|
||||
|
||||
@@ -330,9 +330,10 @@ This is useful when you have other operators managing resources that are no long
|
||||
When client-side apply migration is enabled:
|
||||
1. Argo CD will use the specified field manager (or default if not specified) to perform migration
|
||||
2. During a server-side apply sync operation, it will:
|
||||
- Perform a client-side-apply with the specified field manager
|
||||
- Move the 'last-applied-configuration' annotation to be managed by the specified manager
|
||||
- Perform the server-side apply, which will auto migrate all the fields under the manager that owns the 'last-applied-configuration' annotation.
|
||||
- Check if the specified field manager exists in the resource's `managedFields` with `operation: Update` (indicating client-side apply)
|
||||
- Patch the `managedFields`, transferring field ownership from the client-side apply manager to Argo CD's server-side apply manager (`argocd-controller`)
|
||||
- Remove the client-side apply manager entry from `managedFields`
|
||||
- Perform the server-side apply with the migrated field ownership
|
||||
|
||||
This feature is based on Kubernetes' [client-side to server-side apply migration](https://kubernetes.io/docs/reference/using-api/server-side-apply/#migration-between-client-side-and-server-side-apply).
|
||||
|
||||
|
||||
@@ -17,10 +17,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/csaupgrade"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@@ -1227,7 +1230,9 @@ func (sc *syncContext) shouldUseServerSideApply(targetObj *unstructured.Unstruct
|
||||
}
|
||||
|
||||
// needsClientSideApplyMigration checks if a resource has fields managed by the specified manager
|
||||
// that need to be migrated to the server-side apply manager
|
||||
// with operation "Update" (client-side apply) that need to be migrated to server-side apply.
|
||||
// Client-side apply uses operation "Update", while server-side apply uses operation "Apply".
|
||||
// We only migrate managers with "Update" operation to avoid re-migrating already-migrated managers.
|
||||
func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstructured, fieldManager string) bool {
|
||||
if liveObj == nil || fieldManager == "" {
|
||||
return false
|
||||
@@ -1239,7 +1244,9 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
}
|
||||
|
||||
for _, field := range managedFields {
|
||||
if field.Manager == fieldManager {
|
||||
// Only consider managers with operation "Update" (client-side apply).
|
||||
// Managers with operation "Apply" are already using server-side apply.
|
||||
if field.Manager == fieldManager && field.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1247,29 +1254,70 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
return false
|
||||
}
|
||||
|
||||
// performClientSideApplyMigration performs a client-side-apply using the specified field manager.
|
||||
// This moves the 'last-applied-configuration' field to be managed by the specified manager.
|
||||
// The next time server-side apply is performed, kubernetes automatically migrates all fields from the manager
|
||||
// that owns 'last-applied-configuration' to the manager that uses server-side apply. This will remove the
|
||||
// specified manager from the resources managed fields. 'kubectl-client-side-apply' is used as the default manager.
|
||||
func (sc *syncContext) performClientSideApplyMigration(targetObj *unstructured.Unstructured, fieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(targetObj)).V(1).Info("Performing client-side apply migration step")
|
||||
// performCSAUpgradeMigration uses the csaupgrade package to migrate managed fields
|
||||
// from a client-side apply manager (operation: Update) to the server-side apply manager.
|
||||
// This directly patches the managedFields to transfer field ownership, avoiding the need
|
||||
// to write the last-applied-configuration annotation (which has a 262KB size limit).
|
||||
// This is the primary method for CSA to SSA migration in ArgoCD.
|
||||
func (sc *syncContext) performCSAUpgradeMigration(liveObj *unstructured.Unstructured, csaFieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Performing csaupgrade-based migration")
|
||||
|
||||
// Apply with the specified manager to set up the migration
|
||||
_, err := sc.resourceOps.ApplyResource(
|
||||
context.TODO(),
|
||||
targetObj,
|
||||
cmdutil.DryRunNone,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
fieldManager,
|
||||
)
|
||||
// Get the dynamic resource interface for the live object
|
||||
gvk := liveObj.GroupVersionKind()
|
||||
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(sc.disco, gvk, "patch")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to perform client-side apply migration on manager %s: %w", fieldManager, err)
|
||||
return fmt.Errorf("failed to get api resource for %s: %w", gvk, err)
|
||||
}
|
||||
res := kubeutil.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
|
||||
resIf := kubeutil.ToResourceInterface(sc.dynamicIf, apiResource, res, liveObj.GetNamespace())
|
||||
|
||||
// Use retry to handle conflicts if managed fields changed between reconciliation and now
|
||||
//nolint:wrapcheck // error is wrapped inside the retry function
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Fetch fresh object to get current managed fields state
|
||||
freshObj, getErr := resIf.Get(context.TODO(), liveObj.GetName(), metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get fresh object for CSA migration: %w", getErr)
|
||||
}
|
||||
|
||||
// Check if migration is still needed with fresh state
|
||||
if !sc.needsClientSideApplyMigration(freshObj, csaFieldManager) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"CSA migration no longer needed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate the migration patch using the csaupgrade package
|
||||
// This unions the CSA manager's fields into the SSA manager and removes the CSA manager entry
|
||||
patchData, patchErr := csaupgrade.UpgradeManagedFieldsPatch(
|
||||
freshObj,
|
||||
sets.New(csaFieldManager),
|
||||
sc.serverSideApplyManager,
|
||||
)
|
||||
if patchErr != nil {
|
||||
return fmt.Errorf("failed to generate csaupgrade migration patch: %w", patchErr)
|
||||
}
|
||||
if patchData == nil {
|
||||
// No migration needed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply the migration patch to transfer field ownership.
|
||||
_, patchErr = resIf.Patch(context.TODO(), liveObj.GetName(), types.JSONPatchType, patchData, metav1.PatchOptions{})
|
||||
if patchErr != nil {
|
||||
if apierrors.IsConflict(patchErr) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Retrying CSA migration due to conflict")
|
||||
}
|
||||
// Return the error unmodified so RetryOnConflict can identify conflicts correctly.
|
||||
return patchErr
|
||||
}
|
||||
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Successfully migrated managed fields using csaupgrade")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.ResultCode, string) {
|
||||
@@ -1290,11 +1338,14 @@ func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.R
|
||||
serverSideApply := sc.shouldUseServerSideApply(t.targetObj, dryRun)
|
||||
|
||||
// Check if we need to perform client-side apply migration for server-side apply
|
||||
// Perform client-side apply migration for server-side apply
|
||||
// This uses csaupgrade to directly patch managedFields, transferring ownership
|
||||
// from CSA managers (operation: Update) to the SSA manager (argocd-controller)
|
||||
if serverSideApply && !dryRun && sc.enableClientSideApplyMigration {
|
||||
if sc.needsClientSideApplyMigration(t.liveObj, sc.clientSideApplyMigrationManager) {
|
||||
err = sc.performClientSideApplyMigration(t.targetObj, sc.clientSideApplyMigrationManager)
|
||||
err = sc.performCSAUpgradeMigration(t.liveObj, sc.clientSideApplyMigrationManager)
|
||||
if err != nil {
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration: %v", err)
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration for %s: %v", kubeutil.GetResourceKey(t.liveObj), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2601,6 +2601,21 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}(),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "CSA manager with Apply operation should not need migration",
|
||||
liveObj: func() *unstructured.Unstructured {
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{}}}`)},
|
||||
},
|
||||
})
|
||||
return obj
|
||||
}(),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -2611,6 +2626,129 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_NoMigrationNeeded(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Object with only SSA manager (operation: Apply), no CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "argocd-controller",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:containers":{}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Should return nil (no error) because there's no CSA manager to migrate
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_WithCSAManager(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Create the live object with a CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get the updated object from the fake client
|
||||
gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
||||
updatedObj, err := dynamicClient.Resource(gvr).Namespace(obj.GetNamespace()).Get(context.TODO(), obj.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the CSA manager (operation: Update) no longer exists
|
||||
managedFields := updatedObj.GetManagedFields()
|
||||
for _, mf := range managedFields {
|
||||
if mf.Manager == "kubectl-client-side-apply" && mf.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
t.Errorf("CSA manager 'kubectl-client-side-apply' with operation Update should have been removed, but still exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_ConflictRetry(t *testing.T) {
|
||||
// This test verifies that when a 409 Conflict occurs on the patch because
|
||||
// another actor modified the object between Get and Patch, changing the resourceVersion,
|
||||
// the retry.RetryOnConflict loop retries and eventually succeeds.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
// Simulate a conflict on the first patch attempt where another
|
||||
// controller modified the object between our Get and Patch, bumping resourceVersion).
|
||||
// The second attempt should succeed.
|
||||
patchAttempt := 0
|
||||
dynamicClient.PrependReactor("patch", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patchAttempt++
|
||||
if patchAttempt == 1 {
|
||||
// First attempt: simulate 409 Conflict (resourceVersion mismatch)
|
||||
return true, nil, apierrors.NewConflict(
|
||||
schema.GroupResource{Group: "", Resource: "pods"},
|
||||
obj.GetName(),
|
||||
errors.New("the object has been modified; please apply your changes to the latest version"),
|
||||
)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err, "Migration should succeed after retrying on conflict")
|
||||
assert.Equal(t, 2, patchAttempt, "Expected exactly 2 patch attempts (1 conflict + 1 success)")
|
||||
}
|
||||
|
||||
func diffResultListClusterResource() *diff.DiffResultList {
|
||||
ns1 := testingutils.NewNamespace()
|
||||
ns1.SetName("ns-1")
|
||||
|
||||
6
go.mod
6
go.mod
@@ -147,8 +147,8 @@ require (
|
||||
github.com/ProtonMail/go-crypto v1.1.6 // indirect
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20240116134246-a8cbe886bab0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.8
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.8
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.9
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.9
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
@@ -157,7 +157,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6
|
||||
github.com/aws/smithy-go v1.24.0
|
||||
|
||||
12
go.sum
12
go.sum
@@ -128,10 +128,10 @@ github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.8 h1:iu+64gwDKEoKnyTQskSku72dAwggKI5sV6rNvgSMpMs=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.8/go.mod h1:MI2XvA+qDi3i9AJxX1E2fu730syEBzp/jnXrjxuHwgI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.8 h1:Jp2JYH1lRT3KhX4mshHPvVYsR5qqRec3hGvEarNYoR0=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.8/go.mod h1:fZG9tuvyVfxknv1rKibIz3DobRaFw1Poe8IKtXB3XYY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.9 h1:ktda/mtAydeObvJXlHzyGpK1xcsLaP16zfUPDGoW90A=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.9/go.mod h1:U+fCQ+9QKsLW786BCfEjYRj34VVTbPdsLP3CHSYXMOI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.9 h1:sWvTKsyrMlJGEuj/WgrwilpoJ6Xa1+KhIpGdzw7mMU8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.9/go.mod h1:+J44MBhmfVY/lETFiKI+klz0Vym2aCmIjqgClMmW82w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
@@ -148,8 +148,8 @@ github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1 h1:ZtgZeMPJH8+/vNs9vJFFLI0QEzYbcN0p7x1/FFwyROc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1/go.mod h1:Bar4MrRxeqdn6XIh8JGfiXuFRmyrrsZNTJotxEJmWW0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.10 h1:+VTRawC4iVY58pS/lzpo0lnoa/SYNGF4/B/3/U5ro8Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.10/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 h1:0jbJeuEHlwKJ9PfXtpSFc4MF+WIWORdhN1n30ITZGFM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
|
||||
@@ -217,6 +217,32 @@ func TestClusterURLInRestAPI(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{"test": "val"}, cluster.Labels)
|
||||
}
|
||||
|
||||
func TestClusterSkipReconcileAnnotation(t *testing.T) {
|
||||
fixture.EnsureCleanState(t)
|
||||
|
||||
clusterURL := url.QueryEscape(KubernetesInternalAPIServerAddr)
|
||||
|
||||
var cluster Cluster
|
||||
err := fixture.DoHttpJsonRequest("PUT",
|
||||
fmt.Sprintf("/api/v1/clusters/%s?updatedFields=annotations", clusterURL),
|
||||
&cluster,
|
||||
fmt.Appendf(nil, `{"annotations":{%q:"true"}}`, "argocd.argoproj.io/skip-reconcile")...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "true", cluster.Annotations["argocd.argoproj.io/skip-reconcile"])
|
||||
|
||||
var cluster2 Cluster
|
||||
err = fixture.DoHttpJsonRequest("GET", "/api/v1/clusters/"+clusterURL, &cluster2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "in-cluster", cluster2.Name)
|
||||
assert.Equal(t, "true", cluster2.Annotations["argocd.argoproj.io/skip-reconcile"])
|
||||
|
||||
err = fixture.DoHttpJsonRequest("PUT",
|
||||
fmt.Sprintf("/api/v1/clusters/%s?updatedFields=annotations", clusterURL),
|
||||
&cluster,
|
||||
[]byte(`{"annotations":{}}`)...)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestClusterDeleteDenied(t *testing.T) {
|
||||
ctx := accountFixture.Given(t)
|
||||
ctx.Name("test").
|
||||
|
||||
Reference in New Issue
Block a user