mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 01:28:45 +01:00
fix: use csapgrade to patch managedFields for client-side apply migration (#26289)
Signed-off-by: Peter Jiang <peterjiang823@gmail.com>
This commit is contained in:
@@ -330,9 +330,10 @@ This is useful when you have other operators managing resources that are no long
|
||||
When client-side apply migration is enabled:
|
||||
1. Argo CD will use the specified field manager (or default if not specified) to perform migration
|
||||
2. During a server-side apply sync operation, it will:
|
||||
- Perform a client-side-apply with the specified field manager
|
||||
- Move the 'last-applied-configuration' annotation to be managed by the specified manager
|
||||
- Perform the server-side apply, which will auto migrate all the fields under the manager that owns the 'last-applied-configuration' annotation.
|
||||
- Check if the specified field manager exists in the resource's `managedFields` with `operation: Update` (indicating client-side apply)
|
||||
- Patch the `managedFields`, transferring field ownership from the client-side apply manager to Argo CD's server-side apply manager (`argocd-controller`)
|
||||
- Remove the client-side apply manager entry from `managedFields`
|
||||
- Perform the server-side apply with the migrated field ownership
|
||||
|
||||
This feature is based on Kubernetes' [client-side to server-side apply migration](https://kubernetes.io/docs/reference/using-api/server-side-apply/#migration-between-client-side-and-server-side-apply).
|
||||
|
||||
|
||||
@@ -17,10 +17,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/csaupgrade"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@@ -1227,7 +1230,9 @@ func (sc *syncContext) shouldUseServerSideApply(targetObj *unstructured.Unstruct
|
||||
}
|
||||
|
||||
// needsClientSideApplyMigration checks if a resource has fields managed by the specified manager
|
||||
// that need to be migrated to the server-side apply manager
|
||||
// with operation "Update" (client-side apply) that need to be migrated to server-side apply.
|
||||
// Client-side apply uses operation "Update", while server-side apply uses operation "Apply".
|
||||
// We only migrate managers with "Update" operation to avoid re-migrating already-migrated managers.
|
||||
func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstructured, fieldManager string) bool {
|
||||
if liveObj == nil || fieldManager == "" {
|
||||
return false
|
||||
@@ -1239,7 +1244,9 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
}
|
||||
|
||||
for _, field := range managedFields {
|
||||
if field.Manager == fieldManager {
|
||||
// Only consider managers with operation "Update" (client-side apply).
|
||||
// Managers with operation "Apply" are already using server-side apply.
|
||||
if field.Manager == fieldManager && field.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1247,31 +1254,72 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
return false
|
||||
}
|
||||
|
||||
// performClientSideApplyMigration performs a client-side-apply using the specified field manager.
|
||||
// This moves the 'last-applied-configuration' field to be managed by the specified manager.
|
||||
// The next time server-side apply is performed, kubernetes automatically migrates all fields from the manager
|
||||
// that owns 'last-applied-configuration' to the manager that uses server-side apply. This will remove the
|
||||
// specified manager from the resources managed fields. 'kubectl-client-side-apply' is used as the default manager.
|
||||
func (sc *syncContext) performClientSideApplyMigration(targetObj *unstructured.Unstructured, fieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(targetObj)).V(1).Info("Performing client-side apply migration step")
|
||||
// performCSAUpgradeMigration uses the csaupgrade package to migrate managed fields
|
||||
// from a client-side apply manager (operation: Update) to the server-side apply manager.
|
||||
// This directly patches the managedFields to transfer field ownership, avoiding the need
|
||||
// to write the last-applied-configuration annotation (which has a 262KB size limit).
|
||||
// This is the primary method for CSA to SSA migration in ArgoCD.
|
||||
func (sc *syncContext) performCSAUpgradeMigration(liveObj *unstructured.Unstructured, csaFieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Performing csaupgrade-based migration")
|
||||
|
||||
// Apply with the specified manager to set up the migration
|
||||
_, err := sc.resourceOps.ApplyResource(
|
||||
context.TODO(),
|
||||
targetObj,
|
||||
cmdutil.DryRunNone,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
fieldManager,
|
||||
)
|
||||
// Get the dynamic resource interface for the live object
|
||||
gvk := liveObj.GroupVersionKind()
|
||||
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(sc.disco, gvk, "patch")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to perform client-side apply migration on manager %s: %w", fieldManager, err)
|
||||
return fmt.Errorf("failed to get api resource for %s: %w", gvk, err)
|
||||
}
|
||||
res := kubeutil.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
|
||||
resIf := kubeutil.ToResourceInterface(sc.dynamicIf, apiResource, res, liveObj.GetNamespace())
|
||||
|
||||
// Use retry to handle conflicts if managed fields changed between reconciliation and now
|
||||
//nolint:wrapcheck // error is wrapped inside the retry function
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Fetch fresh object to get current managed fields state
|
||||
freshObj, getErr := resIf.Get(context.TODO(), liveObj.GetName(), metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get fresh object for CSA migration: %w", getErr)
|
||||
}
|
||||
|
||||
// Check if migration is still needed with fresh state
|
||||
if !sc.needsClientSideApplyMigration(freshObj, csaFieldManager) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"CSA migration no longer needed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate the migration patch using the csaupgrade package
|
||||
// This unions the CSA manager's fields into the SSA manager and removes the CSA manager entry
|
||||
patchData, patchErr := csaupgrade.UpgradeManagedFieldsPatch(
|
||||
freshObj,
|
||||
sets.New(csaFieldManager),
|
||||
sc.serverSideApplyManager,
|
||||
)
|
||||
if patchErr != nil {
|
||||
return fmt.Errorf("failed to generate csaupgrade migration patch: %w", patchErr)
|
||||
}
|
||||
if patchData == nil {
|
||||
// No migration needed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply the migration patch to transfer field ownership.
|
||||
_, patchErr = resIf.Patch(context.TODO(), liveObj.GetName(), types.JSONPatchType, patchData, metav1.PatchOptions{})
|
||||
if patchErr != nil {
|
||||
if apierrors.IsConflict(patchErr) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Retrying CSA migration due to conflict")
|
||||
}
|
||||
// Return the error unmodified so RetryOnConflict can identify conflicts correctly.
|
||||
return patchErr
|
||||
}
|
||||
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Successfully migrated managed fields using csaupgrade")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.ResultCode, string) {
|
||||
dryRunStrategy := cmdutil.DryRunNone
|
||||
if dryRun {
|
||||
@@ -1290,11 +1338,14 @@ func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.R
|
||||
serverSideApply := sc.shouldUseServerSideApply(t.targetObj, dryRun)
|
||||
|
||||
// Check if we need to perform client-side apply migration for server-side apply
|
||||
// Perform client-side apply migration for server-side apply
|
||||
// This uses csaupgrade to directly patch managedFields, transferring ownership
|
||||
// from CSA managers (operation: Update) to the SSA manager (argocd-controller)
|
||||
if serverSideApply && !dryRun && sc.enableClientSideApplyMigration {
|
||||
if sc.needsClientSideApplyMigration(t.liveObj, sc.clientSideApplyMigrationManager) {
|
||||
err = sc.performClientSideApplyMigration(t.targetObj, sc.clientSideApplyMigrationManager)
|
||||
err = sc.performCSAUpgradeMigration(t.liveObj, sc.clientSideApplyMigrationManager)
|
||||
if err != nil {
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration: %v", err)
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration for %s: %v", kubeutil.GetResourceKey(t.liveObj), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2601,6 +2601,21 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}(),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "CSA manager with Apply operation should not need migration",
|
||||
liveObj: func() *unstructured.Unstructured {
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{}}}`)},
|
||||
},
|
||||
})
|
||||
return obj
|
||||
}(),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -2611,6 +2626,129 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_NoMigrationNeeded(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Object with only SSA manager (operation: Apply), no CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "argocd-controller",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:containers":{}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Should return nil (no error) because there's no CSA manager to migrate
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_WithCSAManager(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Create the live object with a CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get the updated object from the fake client
|
||||
gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
||||
updatedObj, err := dynamicClient.Resource(gvr).Namespace(obj.GetNamespace()).Get(context.TODO(), obj.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the CSA manager (operation: Update) no longer exists
|
||||
managedFields := updatedObj.GetManagedFields()
|
||||
for _, mf := range managedFields {
|
||||
if mf.Manager == "kubectl-client-side-apply" && mf.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
t.Errorf("CSA manager 'kubectl-client-side-apply' with operation Update should have been removed, but still exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_ConflictRetry(t *testing.T) {
|
||||
// This test verifies that when a 409 Conflict occurs on the patch because
|
||||
// another actor modified the object between Get and Patch, changing the resourceVersion,
|
||||
// the retry.RetryOnConflict loop retries and eventually succeeds.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
// Simulate a conflict on the first patch attempt where another
|
||||
// controller modified the object between our Get and Patch, bumping resourceVersion).
|
||||
// The second attempt should succeed.
|
||||
patchAttempt := 0
|
||||
dynamicClient.PrependReactor("patch", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patchAttempt++
|
||||
if patchAttempt == 1 {
|
||||
// First attempt: simulate 409 Conflict (resourceVersion mismatch)
|
||||
return true, nil, apierrors.NewConflict(
|
||||
schema.GroupResource{Group: "", Resource: "pods"},
|
||||
obj.GetName(),
|
||||
errors.New("the object has been modified; please apply your changes to the latest version"),
|
||||
)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err, "Migration should succeed after retrying on conflict")
|
||||
assert.Equal(t, 2, patchAttempt, "Expected exactly 2 patch attempts (1 conflict + 1 success)")
|
||||
}
|
||||
|
||||
func diffResultListClusterResource() *diff.DiffResultList {
|
||||
ns1 := testingutils.NewNamespace()
|
||||
ns1.SetName("ns-1")
|
||||
|
||||
Reference in New Issue
Block a user