mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-03-09 18:08:48 +01:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff239dcd20 | ||
|
|
4411801980 | ||
|
|
c6df35db8e | ||
|
|
6224d6787e | ||
|
|
5e190219c9 | ||
|
|
968c6338a7 | ||
|
|
3d3760f4b4 | ||
|
|
c61c5931ce | ||
|
|
8a3940d8db | ||
|
|
1bf62aea19 | ||
|
|
67c23193c4 |
@@ -72,7 +72,7 @@ func Test_loadClusters(t *testing.T) {
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "Successful",
|
||||
},
|
||||
ServerVersion: ".",
|
||||
ServerVersion: "0.0.0",
|
||||
Shard: ptr.To(int64(0)),
|
||||
},
|
||||
Namespaces: []string{"test"},
|
||||
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
@@ -43,8 +44,12 @@ func isHookOfType(obj *unstructured.Unstructured, hookType HookType) bool {
|
||||
}
|
||||
|
||||
for k, v := range hookTypeAnnotations[hookType] {
|
||||
if val, ok := obj.GetAnnotations()[k]; ok && val == v {
|
||||
return true
|
||||
if val, ok := obj.GetAnnotations()[k]; ok {
|
||||
if slices.ContainsFunc(strings.Split(val, ","), func(item string) bool {
|
||||
return strings.TrimSpace(item) == v
|
||||
}) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -127,6 +127,16 @@ func TestIsPreDeleteHook(t *testing.T) {
|
||||
annot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Helm PreDelete & PreDelete hook",
|
||||
annot: map[string]string{"helm.sh/hook": "pre-delete,post-delete"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "ArgoCD PostDelete & PreDelete hook",
|
||||
annot: map[string]string{"argocd.argoproj.io/hook": "PostDelete,PreDelete"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -160,6 +170,16 @@ func TestIsPostDeleteHook(t *testing.T) {
|
||||
annot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "ArgoCD PostDelete & PreDelete hook",
|
||||
annot: map[string]string{"argocd.argoproj.io/hook": "PostDelete,PreDelete"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Helm PostDelete & PreDelete hook",
|
||||
annot: map[string]string{"helm.sh/hook": "post-delete,pre-delete"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -171,3 +191,38 @@ func TestIsPostDeleteHook(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiHookOfType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hookType []HookType
|
||||
annot map[string]string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "helm PreDelete & PostDelete hook",
|
||||
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
|
||||
annot: map[string]string{"helm.sh/hook": "pre-delete,post-delete"},
|
||||
expected: true,
|
||||
},
|
||||
|
||||
{
|
||||
name: "ArgoCD PreDelete & PostDelete hook",
|
||||
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
|
||||
annot: map[string]string{"argocd.argoproj.io/hook": "PreDelete,PostDelete"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
obj := &unstructured.Unstructured{}
|
||||
obj.SetAnnotations(tt.annot)
|
||||
|
||||
for _, hookType := range tt.hookType {
|
||||
result := isHookOfType(obj, hookType)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,12 @@ When Argo CD is upgraded manually using plain manifests or Kustomize overlays, i
|
||||
|
||||
Users upgrading Argo CD manually using `helm upgrade` are not impacted by this change, since Helm does not use client-side apply and does not result in creation of the `last-applied` annotation.
|
||||
|
||||
#### Users who previously upgraded to 3.3.0 or 3.3.1
|
||||
In some cases, after upgrading to one of those versions and applying Server-Side Apply, the following error occured:
|
||||
`one or more synchronization tasks completed unsuccessfully, reason: Failed to perform client-side apply migration: failed to perform client-side apply migration on manager kubectl-client-side-apply: error when patching "/dev/shm/2047509016": CustomResourceDefinition.apiextensions.k8s.io "applicationsets.argoproj.io" is invalid: metadata.annotations: Too long: may not be more than 262144 bytes`.
|
||||
|
||||
Users that have configured the sync option `ClientSideApplyMigration=false` as a temporary remediation for the above error, should remove it after upgrading to `3.3.2`. Disabling `ClientSideApplyMigration` imposes a risk to encounter conflicts between K8s field managers in the future.
|
||||
|
||||
### Source Hydrator Now Tracks Hydration State Using Git Notes
|
||||
|
||||
Previously, Argo CD's Source Hydrator pushed a new hydrated commit for every DRY (source) commit, regardless of whether any manifest files (`manifest.yaml`) actually changed. This was necessary for the hydrator to track which DRY commit had last been hydrated: it embedded this information in the `hydrator.metadata` file's `drySha` field in each hydrated commit.
|
||||
@@ -112,3 +118,11 @@ If you rely on Helm charts within kustomization files, please review the details
|
||||
* [services.cloud.sap.com/ServiceBinding](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
|
||||
* [services.cloud.sap.com/ServiceInstance](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
|
||||
* [\_.cnrm.cloud.google.com/\_](https://github.com/argoproj/argo-cd/commit/30abebda3d930d93065eec8864aac7e0d56ae119)
|
||||
|
||||
## More detailed cluster version
|
||||
|
||||
3.3.3 now stores the cluster version in a more detailed format, Major.Minor.Patch compared to the previous format Major.Minor.
|
||||
This change is to make it easier to compare versions and to support future features.
|
||||
This change also allows for more accurate version comparisons and better compatibility with future Kubernetes releases.
|
||||
|
||||
Users will notice it in the UI and the CLI commands that retrieve cluster information.
|
||||
@@ -330,9 +330,10 @@ This is useful when you have other operators managing resources that are no long
|
||||
When client-side apply migration is enabled:
|
||||
1. Argo CD will use the specified field manager (or default if not specified) to perform migration
|
||||
2. During a server-side apply sync operation, it will:
|
||||
- Perform a client-side-apply with the specified field manager
|
||||
- Move the 'last-applied-configuration' annotation to be managed by the specified manager
|
||||
- Perform the server-side apply, which will auto migrate all the fields under the manager that owns the 'last-applied-configuration' annotation.
|
||||
- Check if the specified field manager exists in the resource's `managedFields` with `operation: Update` (indicating client-side apply)
|
||||
- Patch the `managedFields`, transferring field ownership from the client-side apply manager to Argo CD's server-side apply manager (`argocd-controller`)
|
||||
- Remove the client-side apply manager entry from `managedFields`
|
||||
- Perform the server-side apply with the migrated field ownership
|
||||
|
||||
This feature is based on Kubernetes' [client-side apply migration KEP](https://github.com/alexzielenski/enhancements/blob/03df8820b9feca6d2cab78e303c99b2c9c0c4c5c/keps/sig-cli/3517-kubectl-client-side-apply-migration/README.md), which provides the auto migration from client-side to server-side apply.
|
||||
|
||||
|
||||
26
gitops-engine/pkg/cache/cluster.go
vendored
26
gitops-engine/pkg/cache/cluster.go
vendored
@@ -1213,7 +1213,9 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
|
||||
}
|
||||
|
||||
// processCrossNamespaceChildren processes namespaced children of cluster-scoped resources
|
||||
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries
|
||||
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries.
|
||||
// It also handles multi-level hierarchies where cluster-scoped resources own other cluster-scoped resources
|
||||
// that in turn own namespaced resources (e.g., Provider -> ProviderRevision -> Deployment in Crossplane).
|
||||
func (c *clusterCache) processCrossNamespaceChildren(
|
||||
clusterScopedKeys []kube.ResourceKey,
|
||||
visited map[kube.ResourceKey]int,
|
||||
@@ -1230,7 +1232,21 @@ func (c *clusterCache) processCrossNamespaceChildren(
|
||||
childKeys := c.parentUIDToChildren[clusterResource.Ref.UID]
|
||||
for _, childKey := range childKeys {
|
||||
child := c.resources[childKey]
|
||||
if child == nil || visited[childKey] != 0 {
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
alreadyVisited := visited[childKey] != 0
|
||||
|
||||
// If child is cluster-scoped and was already visited by processNamespaceHierarchy,
|
||||
// we still need to recursively check for its cross-namespace children.
|
||||
// This handles multi-level hierarchies like: ClusterScoped -> ClusterScoped -> Namespaced
|
||||
// (e.g., Crossplane's Provider -> ProviderRevision -> Deployment)
|
||||
if alreadyVisited {
|
||||
if childKey.Namespace == "" {
|
||||
// Recursively process cross-namespace children of this cluster-scoped child
|
||||
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1245,6 +1261,12 @@ func (c *clusterCache) processCrossNamespaceChildren(
|
||||
visited[childKey] = 1
|
||||
// Recursively process descendants using index-based traversal
|
||||
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
|
||||
|
||||
// If this child is also cluster-scoped, recursively process its cross-namespace children
|
||||
if childKey.Namespace == "" {
|
||||
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
|
||||
}
|
||||
|
||||
visited[childKey] = 2
|
||||
}
|
||||
}
|
||||
|
||||
204
gitops-engine/pkg/cache/cluster_test.go
vendored
204
gitops-engine/pkg/cache/cluster_test.go
vendored
@@ -1350,6 +1350,98 @@ func TestIterateHierarchyV2_ClusterScopedParent_FindsAllChildren(t *testing.T) {
|
||||
assert.ElementsMatch(t, expected, keys)
|
||||
}
|
||||
|
||||
func TestIterateHierarchyV2_MultiLevelClusterScoped_FindsNamespacedGrandchildren(t *testing.T) {
|
||||
// Test 3-level hierarchy: ClusterScoped -> ClusterScoped -> Namespaced
|
||||
// This test the scenario where:
|
||||
// Provider (managed) -> ProviderRevision (dynamic) -> Deployment (namespaced)
|
||||
// The namespaced grandchildren should be found even when only the root is passed as a key.
|
||||
|
||||
// Level 1: Cluster-scoped parent (like Provider - this is the "managed" resource)
|
||||
clusterParent := &corev1.Namespace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "root-cluster-parent",
|
||||
UID: "root-parent-uid",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
}
|
||||
|
||||
// Level 2: Cluster-scoped intermediate (like ProviderRevision - dynamically created, NOT managed)
|
||||
clusterIntermediate := &rbacv1.ClusterRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "intermediate-cluster-child",
|
||||
UID: "intermediate-uid",
|
||||
ResourceVersion: "1",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "v1",
|
||||
Kind: "Namespace",
|
||||
Name: "root-cluster-parent",
|
||||
UID: "root-parent-uid",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
// Level 3: Namespaced grandchild (like Deployment owned by ProviderRevision)
|
||||
namespacedGrandchild := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "namespaced-grandchild",
|
||||
Namespace: "some-namespace",
|
||||
UID: "grandchild-uid",
|
||||
ResourceVersion: "1",
|
||||
OwnerReferences: []metav1.OwnerReference{{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
Kind: "ClusterRole",
|
||||
Name: "intermediate-cluster-child",
|
||||
UID: "intermediate-uid",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := newCluster(t, clusterParent, clusterIntermediate, namespacedGrandchild).WithAPIResources([]kube.APIResourceInfo{
|
||||
{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
},
|
||||
{
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
},
|
||||
})
|
||||
err := cluster.EnsureSynced()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Only pass the root cluster-scoped parent as a key (simulating managed resources)
|
||||
// The intermediate and grandchild should be discovered through traversal
|
||||
keys := []kube.ResourceKey{}
|
||||
cluster.IterateHierarchyV2(
|
||||
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(clusterParent))},
|
||||
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
keys = append(keys, resource.ResourceKey())
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// Should find all 3 levels: parent, intermediate, AND the namespaced grandchild
|
||||
expected := []kube.ResourceKey{
|
||||
kube.GetResourceKey(mustToUnstructured(clusterParent)),
|
||||
kube.GetResourceKey(mustToUnstructured(clusterIntermediate)),
|
||||
kube.GetResourceKey(mustToUnstructured(namespacedGrandchild)), // This is the bug - currently NOT found
|
||||
}
|
||||
assert.ElementsMatch(t, expected, keys)
|
||||
}
|
||||
|
||||
func TestIterateHierarchyV2_ClusterScopedParentOnly_InferredUID(t *testing.T) {
|
||||
// Test that passing only a cluster-scoped parent finds children even with inferred UIDs.
|
||||
@@ -1912,6 +2004,118 @@ func BenchmarkIterateHierarchyV2_ClusterParentTraversal(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkIterateHierarchyV2_MultiLevelClusterScoped tests the performance of
|
||||
// multi-level cluster-scoped hierarchies: ClusterScoped -> ClusterScoped -> Namespaced
|
||||
func BenchmarkIterateHierarchyV2_MultiLevelClusterScoped(b *testing.B) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
intermediateChildren int // Number of intermediate cluster-scoped children per root
|
||||
namespacedGrandchildren int // Number of namespaced grandchildren per intermediate
|
||||
totalNamespaces int
|
||||
}{
|
||||
// Baseline: no multi-level hierarchy
|
||||
{"NoMultiLevel", 0, 0, 10},
|
||||
// Typical Crossplane scenario: 1 ProviderRevision per Provider, few Deployments
|
||||
{"1Intermediate_5Grandchildren", 1, 5, 10},
|
||||
// Multiple ProviderRevisions per Provider
|
||||
{"5Intermediate_5Grandchildren", 5, 5, 10},
|
||||
// Larger hierarchy
|
||||
{"10Intermediate_10Grandchildren", 10, 10, 20},
|
||||
// Stress test
|
||||
{"20Intermediate_20Grandchildren", 20, 20, 50},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
cluster := newCluster(b).WithAPIResources([]kube.APIResourceInfo{{
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
|
||||
Meta: metav1.APIResource{Namespaced: false},
|
||||
}, {
|
||||
GroupKind: schema.GroupKind{Group: "", Kind: "Pod"},
|
||||
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
|
||||
Meta: metav1.APIResource{Namespaced: true},
|
||||
}})
|
||||
|
||||
cluster.namespacedResources = map[schema.GroupKind]bool{
|
||||
{Group: "", Kind: "Pod"}: true,
|
||||
{Group: "", Kind: "Namespace"}: false,
|
||||
{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: false,
|
||||
}
|
||||
|
||||
// Create root cluster-scoped parent (Namespace, simulating Provider)
|
||||
rootUID := uuid.New().String()
|
||||
rootYaml := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: root-parent
|
||||
uid: %s`, rootUID)
|
||||
rootKey := kube.ResourceKey{Kind: "Namespace", Name: "root-parent"}
|
||||
cluster.setNode(cacheTest.newResource(strToUnstructured(rootYaml)))
|
||||
|
||||
// Create intermediate cluster-scoped children (ClusterRoles, simulating ProviderRevisions)
|
||||
intermediateUIDs := make([]string, tc.intermediateChildren)
|
||||
for i := 0; i < tc.intermediateChildren; i++ {
|
||||
uid := uuid.New().String()
|
||||
intermediateUIDs[i] = uid
|
||||
name := fmt.Sprintf("intermediate-%d", i)
|
||||
intermediateYaml := fmt.Sprintf(`
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: %s
|
||||
uid: %s
|
||||
ownerReferences:
|
||||
- apiVersion: v1
|
||||
kind: Namespace
|
||||
name: root-parent
|
||||
uid: %s
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get"]`, name, uid, rootUID)
|
||||
cluster.setNode(cacheTest.newResource(strToUnstructured(intermediateYaml)))
|
||||
}
|
||||
|
||||
// Create namespaced grandchildren (Pods, simulating Deployments)
|
||||
for i := 0; i < tc.intermediateChildren; i++ {
|
||||
for j := 0; j < tc.namespacedGrandchildren; j++ {
|
||||
nsIdx := (i*tc.namespacedGrandchildren + j) % tc.totalNamespaces
|
||||
namespace := fmt.Sprintf("ns-%d", nsIdx)
|
||||
podName := fmt.Sprintf("grandchild-%d-%d", i, j)
|
||||
podUID := uuid.New().String()
|
||||
podYaml := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: %s
|
||||
namespace: %s
|
||||
uid: %s
|
||||
ownerReferences:
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
name: intermediate-%d
|
||||
uid: %s`, podName, namespace, podUID, i, intermediateUIDs[i])
|
||||
cluster.setNode(cacheTest.newResource(strToUnstructured(podYaml)))
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
cluster.IterateHierarchyV2([]kube.ResourceKey{rootKey}, func(_ *Resource, _ map[kube.ResourceKey]*Resource) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIterateHierarchyV2_NoDuplicatesInSameNamespace(t *testing.T) {
|
||||
// Create a parent-child relationship in the same namespace
|
||||
|
||||
@@ -17,10 +17,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/csaupgrade"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@@ -1110,7 +1113,9 @@ func (sc *syncContext) shouldUseServerSideApply(targetObj *unstructured.Unstruct
|
||||
}
|
||||
|
||||
// needsClientSideApplyMigration checks if a resource has fields managed by the specified manager
|
||||
// that need to be migrated to the server-side apply manager
|
||||
// with operation "Update" (client-side apply) that need to be migrated to server-side apply.
|
||||
// Client-side apply uses operation "Update", while server-side apply uses operation "Apply".
|
||||
// We only migrate managers with "Update" operation to avoid re-migrating already-migrated managers.
|
||||
func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstructured, fieldManager string) bool {
|
||||
if liveObj == nil || fieldManager == "" {
|
||||
return false
|
||||
@@ -1122,7 +1127,9 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
}
|
||||
|
||||
for _, field := range managedFields {
|
||||
if field.Manager == fieldManager {
|
||||
// Only consider managers with operation "Update" (client-side apply).
|
||||
// Managers with operation "Apply" are already using server-side apply.
|
||||
if field.Manager == fieldManager && field.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1130,29 +1137,70 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
return false
|
||||
}
|
||||
|
||||
// performClientSideApplyMigration performs a client-side-apply using the specified field manager.
|
||||
// This moves the 'last-applied-configuration' field to be managed by the specified manager.
|
||||
// The next time server-side apply is performed, kubernetes automatically migrates all fields from the manager
|
||||
// that owns 'last-applied-configuration' to the manager that uses server-side apply. This will remove the
|
||||
// specified manager from the resources managed fields. 'kubectl-client-side-apply' is used as the default manager.
|
||||
func (sc *syncContext) performClientSideApplyMigration(targetObj *unstructured.Unstructured, fieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(targetObj)).V(1).Info("Performing client-side apply migration step")
|
||||
// performCSAUpgradeMigration uses the csaupgrade package to migrate managed fields
|
||||
// from a client-side apply manager (operation: Update) to the server-side apply manager.
|
||||
// This directly patches the managedFields to transfer field ownership, avoiding the need
|
||||
// to write the last-applied-configuration annotation (which has a 262KB size limit).
|
||||
// This is the primary method for CSA to SSA migration in ArgoCD.
|
||||
func (sc *syncContext) performCSAUpgradeMigration(liveObj *unstructured.Unstructured, csaFieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Performing csaupgrade-based migration")
|
||||
|
||||
// Apply with the specified manager to set up the migration
|
||||
_, err := sc.resourceOps.ApplyResource(
|
||||
context.TODO(),
|
||||
targetObj,
|
||||
cmdutil.DryRunNone,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
fieldManager,
|
||||
)
|
||||
// Get the dynamic resource interface for the live object
|
||||
gvk := liveObj.GroupVersionKind()
|
||||
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(sc.disco, gvk, "patch")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to perform client-side apply migration on manager %s: %w", fieldManager, err)
|
||||
return fmt.Errorf("failed to get api resource for %s: %w", gvk, err)
|
||||
}
|
||||
res := kubeutil.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
|
||||
resIf := kubeutil.ToResourceInterface(sc.dynamicIf, apiResource, res, liveObj.GetNamespace())
|
||||
|
||||
return nil
|
||||
// Use retry to handle conflicts if managed fields changed between reconciliation and now
|
||||
//nolint:wrapcheck // error is wrapped inside the retry function
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Fetch fresh object to get current managed fields state
|
||||
freshObj, getErr := resIf.Get(context.TODO(), liveObj.GetName(), metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get fresh object for CSA migration: %w", getErr)
|
||||
}
|
||||
|
||||
// Check if migration is still needed with fresh state
|
||||
if !sc.needsClientSideApplyMigration(freshObj, csaFieldManager) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"CSA migration no longer needed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate the migration patch using the csaupgrade package
|
||||
// This unions the CSA manager's fields into the SSA manager and removes the CSA manager entry
|
||||
patchData, patchErr := csaupgrade.UpgradeManagedFieldsPatch(
|
||||
freshObj,
|
||||
sets.New(csaFieldManager),
|
||||
sc.serverSideApplyManager,
|
||||
)
|
||||
if patchErr != nil {
|
||||
return fmt.Errorf("failed to generate csaupgrade migration patch: %w", patchErr)
|
||||
}
|
||||
if patchData == nil {
|
||||
// No migration needed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply the migration patch to transfer field ownership.
|
||||
_, patchErr = resIf.Patch(context.TODO(), liveObj.GetName(), types.JSONPatchType, patchData, metav1.PatchOptions{})
|
||||
if patchErr != nil {
|
||||
if apierrors.IsConflict(patchErr) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Retrying CSA migration due to conflict")
|
||||
}
|
||||
// Return the error unmodified so RetryOnConflict can identify conflicts correctly.
|
||||
return patchErr
|
||||
}
|
||||
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Successfully migrated managed fields using csaupgrade")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.ResultCode, string) {
|
||||
@@ -1173,11 +1221,14 @@ func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.R
|
||||
serverSideApply := sc.shouldUseServerSideApply(t.targetObj, dryRun)
|
||||
|
||||
// Check if we need to perform client-side apply migration for server-side apply
|
||||
// Perform client-side apply migration for server-side apply
|
||||
// This uses csaupgrade to directly patch managedFields, transferring ownership
|
||||
// from CSA managers (operation: Update) to the SSA manager (argocd-controller)
|
||||
if serverSideApply && !dryRun && sc.enableClientSideApplyMigration {
|
||||
if sc.needsClientSideApplyMigration(t.liveObj, sc.clientSideApplyMigrationManager) {
|
||||
err = sc.performClientSideApplyMigration(t.targetObj, sc.clientSideApplyMigrationManager)
|
||||
err = sc.performCSAUpgradeMigration(t.liveObj, sc.clientSideApplyMigrationManager)
|
||||
if err != nil {
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration: %v", err)
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration for %s: %v", kubeutil.GetResourceKey(t.liveObj), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2417,6 +2417,21 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}(),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "CSA manager with Apply operation should not need migration",
|
||||
liveObj: func() *unstructured.Unstructured {
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{}}}`)},
|
||||
},
|
||||
})
|
||||
return obj
|
||||
}(),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -2427,6 +2442,129 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_NoMigrationNeeded(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Object with only SSA manager (operation: Apply), no CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "argocd-controller",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:containers":{}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Should return nil (no error) because there's no CSA manager to migrate
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_WithCSAManager(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Create the live object with a CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get the updated object from the fake client
|
||||
gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
||||
updatedObj, err := dynamicClient.Resource(gvr).Namespace(obj.GetNamespace()).Get(context.TODO(), obj.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the CSA manager (operation: Update) no longer exists
|
||||
managedFields := updatedObj.GetManagedFields()
|
||||
for _, mf := range managedFields {
|
||||
if mf.Manager == "kubectl-client-side-apply" && mf.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
t.Errorf("CSA manager 'kubectl-client-side-apply' with operation Update should have been removed, but still exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_ConflictRetry(t *testing.T) {
|
||||
// This test verifies that when a 409 Conflict occurs on the patch because
|
||||
// another actor modified the object between Get and Patch, changing the resourceVersion,
|
||||
// the retry.RetryOnConflict loop retries and eventually succeeds.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
// Simulate a conflict on the first patch attempt where another
|
||||
// controller modified the object between our Get and Patch, bumping resourceVersion).
|
||||
// The second attempt should succeed.
|
||||
patchAttempt := 0
|
||||
dynamicClient.PrependReactor("patch", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patchAttempt++
|
||||
if patchAttempt == 1 {
|
||||
// First attempt: simulate 409 Conflict (resourceVersion mismatch)
|
||||
return true, nil, apierrors.NewConflict(
|
||||
schema.GroupResource{Group: "", Resource: "pods"},
|
||||
obj.GetName(),
|
||||
errors.New("the object has been modified; please apply your changes to the latest version"),
|
||||
)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err, "Migration should succeed after retrying on conflict")
|
||||
assert.Equal(t, 2, patchAttempt, "Expected exactly 2 patch attempts (1 conflict + 1 success)")
|
||||
}
|
||||
|
||||
func diffResultListClusterResource() *diff.DiffResultList {
|
||||
ns1 := testingutils.NewNamespace()
|
||||
ns1.SetName("ns-1")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
@@ -349,7 +350,15 @@ func (k *KubectlCmd) GetServerVersion(config *rest.Config) (string, error) {
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get server version: %w", err)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", v.Major, v.Minor), nil
|
||||
|
||||
ver, err := version.ParseGeneric(v.GitVersion)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse server version: %w", err)
|
||||
}
|
||||
// ParseGeneric removes the leading "v" and any vendor-specific suffix (e.g. "-gke.100", "-eks-123", "+k3s1").
|
||||
// Helm expects a semver-like Kubernetes version with a "v" prefix for capability checks, so we normalize the
|
||||
// version to "v<major>.<minor>.<patch>".
|
||||
return "v" + ver.String(), nil
|
||||
}
|
||||
|
||||
func (k *KubectlCmd) NewDynamicClient(config *rest.Config) (dynamic.Interface, error) {
|
||||
|
||||
@@ -4,10 +4,14 @@ import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
openapi_v2 "github.com/google/gnostic-models/openapiv2"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
@@ -69,6 +73,80 @@ func TestConvertToVersion(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetServerVersion(t *testing.T) {
|
||||
t.Run("returns full semantic version with patch", func(t *testing.T) {
|
||||
fakeServer := fakeHTTPServer(version.Info{
|
||||
Major: "1",
|
||||
Minor: "34",
|
||||
GitVersion: "v1.34.0",
|
||||
GitCommit: "abc123def456",
|
||||
Platform: "linux/amd64",
|
||||
}, nil)
|
||||
defer fakeServer.Close()
|
||||
config := mockConfig(fakeServer.URL)
|
||||
|
||||
serverVersion, err := kubectlCmd().GetServerVersion(config)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.34.0", serverVersion, "Should return full semantic serverVersion")
|
||||
assert.Regexp(t, `^v\d+\.\d+\.\d+`, serverVersion, "Should match semver pattern with 'v' prefix")
|
||||
assert.NotEqual(t, "1.34", serverVersion, "Should not be old Major.Minor format")
|
||||
})
|
||||
|
||||
t.Run("do not preserver build metadata", func(t *testing.T) {
|
||||
fakeServer := fakeHTTPServer(version.Info{
|
||||
Major: "1",
|
||||
Minor: "30",
|
||||
GitVersion: "v1.30.11+IKS",
|
||||
GitCommit: "xyz789",
|
||||
Platform: "linux/amd64",
|
||||
}, nil)
|
||||
defer fakeServer.Close()
|
||||
config := mockConfig(fakeServer.URL)
|
||||
|
||||
serverVersion, err := kubectlCmd().GetServerVersion(config)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.30.11", serverVersion, "Should not preserve build metadata")
|
||||
assert.NotContains(t, serverVersion, "+IKS", "Should not contain provider-specific metadata")
|
||||
assert.NotEqual(t, "1.30", serverVersion, "Should not strip to Major.Minor")
|
||||
})
|
||||
|
||||
t.Run("handles error from discovery client", func(t *testing.T) {
|
||||
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer fakeServer.Close()
|
||||
config := mockConfig(fakeServer.URL)
|
||||
|
||||
_, err := kubectlCmd().GetServerVersion(config)
|
||||
assert.Error(t, err, "Should return error when server fails")
|
||||
assert.Contains(t, err.Error(), "failed to get server version",
|
||||
"Error should indicate version retrieval failure")
|
||||
})
|
||||
|
||||
t.Run("handles minor version with plus suffix", func(t *testing.T) {
|
||||
fakeServer := fakeHTTPServer(version.Info{
|
||||
Major: "1",
|
||||
Minor: "30+",
|
||||
GitVersion: "v1.30.0",
|
||||
}, nil)
|
||||
defer fakeServer.Close()
|
||||
config := mockConfig(fakeServer.URL)
|
||||
serverVersion, err := kubectlCmd().GetServerVersion(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "v1.30.0", serverVersion)
|
||||
assert.NotContains(t, serverVersion, "+", "Should not contain the '+' from Minor field")
|
||||
})
|
||||
}
|
||||
|
||||
func kubectlCmd() *KubectlCmd {
|
||||
kubectl := &KubectlCmd{
|
||||
Log: textlogger.NewLogger(textlogger.NewConfig()),
|
||||
Tracer: tracing.NopTracer{},
|
||||
}
|
||||
return kubectl
|
||||
}
|
||||
|
||||
/**
|
||||
Getting the test data here was challenging.
|
||||
|
||||
@@ -108,3 +186,21 @@ func (f *fakeOpenAPIClient) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
}
|
||||
return document, nil
|
||||
}
|
||||
|
||||
func mockConfig(host string) *rest.Config {
|
||||
return &rest.Config{
|
||||
Host: host,
|
||||
}
|
||||
}
|
||||
|
||||
func fakeHTTPServer(info version.Info, err error) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/version" {
|
||||
versionInfo := info
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(versionInfo)
|
||||
return
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -45,6 +45,10 @@ fi
|
||||
# if the tag has not been declared, and we are on a release branch, use the VERSION file.
|
||||
if [ "$IMAGE_TAG" = "" ]; then
|
||||
branch=$(git rev-parse --abbrev-ref HEAD)
|
||||
# In GitHub Actions PRs, HEAD is detached; use GITHUB_BASE_REF (the target branch) instead
|
||||
if [ "$branch" = "HEAD" ] && [ -n "${GITHUB_BASE_REF:-}" ]; then
|
||||
branch="$GITHUB_BASE_REF"
|
||||
fi
|
||||
if [[ $branch = release-* ]]; then
|
||||
pwd
|
||||
IMAGE_TAG=v$(cat "$SRCROOT/VERSION")
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.1
|
||||
newTag: v3.3.3
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.1
|
||||
newTag: v3.3.3
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
12
manifests/core-install-with-hydrator.yaml
generated
12
manifests/core-install-with-hydrator.yaml
generated
@@ -31273,7 +31273,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31408,7 +31408,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31536,7 +31536,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31833,7 +31833,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31886,7 +31886,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32234,7 +32234,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
10
manifests/core-install.yaml
generated
10
manifests/core-install.yaml
generated
@@ -31241,7 +31241,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31370,7 +31370,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31667,7 +31667,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31720,7 +31720,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32068,7 +32068,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.1
|
||||
newTag: v3.3.3
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.1
|
||||
newTag: v3.3.3
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
18
manifests/ha/install-with-hydrator.yaml
generated
18
manifests/ha/install-with-hydrator.yaml
generated
@@ -32639,7 +32639,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32774,7 +32774,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32925,7 +32925,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -33021,7 +33021,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -33145,7 +33145,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33468,7 +33468,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33521,7 +33521,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33895,7 +33895,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34279,7 +34279,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/install.yaml
generated
16
manifests/ha/install.yaml
generated
@@ -32609,7 +32609,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32761,7 +32761,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32857,7 +32857,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32981,7 +32981,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33304,7 +33304,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33357,7 +33357,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33731,7 +33731,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34115,7 +34115,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
@@ -1897,7 +1897,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2032,7 +2032,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2183,7 +2183,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2279,7 +2279,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2403,7 +2403,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2726,7 +2726,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2779,7 +2779,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3153,7 +3153,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3537,7 +3537,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/namespace-install.yaml
generated
16
manifests/ha/namespace-install.yaml
generated
@@ -1867,7 +1867,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2019,7 +2019,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2115,7 +2115,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2239,7 +2239,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2562,7 +2562,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2615,7 +2615,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2989,7 +2989,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3373,7 +3373,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/install-with-hydrator.yaml
generated
18
manifests/install-with-hydrator.yaml
generated
@@ -31717,7 +31717,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31852,7 +31852,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32003,7 +32003,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32099,7 +32099,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32201,7 +32201,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32498,7 +32498,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32551,7 +32551,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32923,7 +32923,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33307,7 +33307,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/install.yaml
generated
16
manifests/install.yaml
generated
@@ -31685,7 +31685,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31837,7 +31837,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -31933,7 +31933,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32035,7 +32035,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32332,7 +32332,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32385,7 +32385,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32757,7 +32757,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33141,7 +33141,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/namespace-install-with-hydrator.yaml
generated
18
manifests/namespace-install-with-hydrator.yaml
generated
@@ -975,7 +975,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1110,7 +1110,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1261,7 +1261,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1357,7 +1357,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1459,7 +1459,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1756,7 +1756,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1809,7 +1809,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2181,7 +2181,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2565,7 +2565,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/namespace-install.yaml
generated
16
manifests/namespace-install.yaml
generated
@@ -943,7 +943,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1095,7 +1095,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1191,7 +1191,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1293,7 +1293,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1590,7 +1590,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1643,7 +1643,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2015,7 +2015,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2399,7 +2399,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
image: quay.io/argoproj/argocd:v3.3.3
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
local actions = {}
|
||||
|
||||
-- https://github.com/cloudnative-pg/cloudnative-pg/tree/main/internal/cmd/plugin/restart
|
||||
actions["restart"] = {
|
||||
["iconClass"] = "fa fa-fw fa-recycle",
|
||||
["displayName"] = "Rollout restart Cluster"
|
||||
}
|
||||
|
||||
-- https://github.com/cloudnative-pg/cloudnative-pg/tree/main/internal/cmd/plugin/reload
|
||||
actions["reload"] = {
|
||||
["iconClass"] = "fa fa-fw fa-rotate-right",
|
||||
["displayName"] = "Reload all Configuration"
|
||||
}
|
||||
|
||||
-- https://github.com/cloudnative-pg/cloudnative-pg/tree/main/internal/cmd/plugin/promote
|
||||
actions["promote"] = {
|
||||
["iconClass"] = "fa fa-fw fa-angles-up",
|
||||
["displayName"] = "Promote Replica to Primary",
|
||||
@@ -19,9 +25,10 @@ actions["promote"] = {
|
||||
}
|
||||
}
|
||||
|
||||
-- Check if reconciliation is currently suspended
|
||||
-- Suspend reconciliation loop for a cluster
|
||||
-- https://cloudnative-pg.io/docs/1.28/failure_modes/#disabling-reconciliation
|
||||
local isSuspended = false
|
||||
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliation"] == "disabled" then
|
||||
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliationLoop"] == "disabled" then
|
||||
isSuspended = true
|
||||
end
|
||||
|
||||
|
||||
@@ -6,5 +6,5 @@ if obj.metadata.annotations == nil then
|
||||
obj.metadata.annotations = {}
|
||||
end
|
||||
|
||||
obj.metadata.annotations["cnpg.io/reconciliation"] = nil
|
||||
obj.metadata.annotations["cnpg.io/reconciliationLoop"] = nil
|
||||
return obj
|
||||
|
||||
@@ -6,5 +6,5 @@ if obj.metadata.annotations == nil then
|
||||
obj.metadata.annotations = {}
|
||||
end
|
||||
|
||||
obj.metadata.annotations["cnpg.io/reconciliation"] = "disabled"
|
||||
obj.metadata.annotations["cnpg.io/reconciliationLoop"] = "disabled"
|
||||
return obj
|
||||
|
||||
@@ -33,7 +33,7 @@ function hibernating(obj)
|
||||
end
|
||||
|
||||
-- Check if reconciliation is suspended, since this is an explicit user action we return the "suspended" status immediately
|
||||
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliation"] == "disabled" then
|
||||
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliationLoop"] == "disabled" then
|
||||
hs.status = "Suspended"
|
||||
hs.message = "Cluster reconciliation is suspended"
|
||||
return hs
|
||||
|
||||
@@ -2,7 +2,7 @@ apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
annotations:
|
||||
cnpg.io/reconciliation: "disabled"
|
||||
cnpg.io/reconciliationLoop: "disabled"
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"postgresql.cnpg.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"imageName":"ghcr.io/cloudnative-pg/postgresql:13","instances":3,"storage":{"size":"1Gi"}}}
|
||||
creationTimestamp: "2025-04-25T20:44:24Z"
|
||||
|
||||
@@ -56,6 +56,17 @@ if not obj.status.environments or #obj.status.environments == 0 then
|
||||
return hs
|
||||
end
|
||||
|
||||
-- Use note.drySha as canonical proposed SHA when present; fallback to proposed.dry.sha.
|
||||
local function getProposedDrySha(env)
|
||||
if env and env.proposed and env.proposed.note and env.proposed.note.drySha and env.proposed.note.drySha ~= "" then
|
||||
return env.proposed.note.drySha
|
||||
end
|
||||
if env and env.proposed and env.proposed.dry and env.proposed.dry.sha and env.proposed.dry.sha ~= "" then
|
||||
return env.proposed.dry.sha
|
||||
end
|
||||
return nil
|
||||
end
|
||||
|
||||
-- Make sure there's a fully-populated status for both active and proposed commits in all environments. If anything is
|
||||
-- missing or empty, return a Progressing status.
|
||||
for _, env in ipairs(obj.status.environments) do
|
||||
@@ -64,7 +75,7 @@ for _, env in ipairs(obj.status.environments) do
|
||||
hs.message = "The active commit DRY SHA is missing or empty in environment '" .. env.branch .. "'."
|
||||
return hs
|
||||
end
|
||||
if not env.proposed or not env.proposed.dry or not env.proposed.dry.sha or env.proposed.dry.sha == "" then
|
||||
if not getProposedDrySha(env) then
|
||||
hs.status = "Progressing"
|
||||
hs.message = "The proposed commit DRY SHA is missing or empty in environment '" .. env.branch .. "'."
|
||||
return hs
|
||||
@@ -72,9 +83,9 @@ for _, env in ipairs(obj.status.environments) do
|
||||
end
|
||||
|
||||
-- Check if all the proposed environments have the same proposed commit dry sha. If not, return a Progressing status.
|
||||
local proposedSha = obj.status.environments[1].proposed.dry.sha -- Don't panic, Lua is 1-indexed.
|
||||
local proposedSha = getProposedDrySha(obj.status.environments[1]) -- Don't panic, Lua is 1-indexed.
|
||||
for _, env in ipairs(obj.status.environments) do
|
||||
if env.proposed.dry.sha ~= proposedSha then
|
||||
if getProposedDrySha(env) ~= proposedSha then
|
||||
hs.status = "Progressing"
|
||||
hs.message = "Not all environments have the same proposed commit SHA. This likely means the hydrator has not run for all environments yet."
|
||||
return hs
|
||||
@@ -96,7 +107,8 @@ end
|
||||
-- statuses and build a summary about how many are pending, successful, or failed. Return a Progressing status for this
|
||||
-- in-progress environment.
|
||||
for _, env in ipairs(obj.status.environments) do
|
||||
if env.proposed.dry.sha ~= env.active.dry.sha then
|
||||
local envProposedSha = getProposedDrySha(env)
|
||||
if envProposedSha ~= env.active.dry.sha then
|
||||
local pendingCount = 0
|
||||
local successCount = 0
|
||||
local failureCount = 0
|
||||
@@ -121,7 +133,7 @@ for _, env in ipairs(obj.status.environments) do
|
||||
hs.message =
|
||||
"Promotion in progress for environment '" .. env.branch ..
|
||||
"' from '" .. getShortSha(env.active.dry.sha) ..
|
||||
"' to '" .. getShortSha(env.proposed.dry.sha) .. "': " ..
|
||||
"' to '" .. getShortSha(envProposedSha) .. "': " ..
|
||||
pendingCount .. " pending, " .. successCount .. " successful, " .. failureCount .. " failed. "
|
||||
|
||||
if pendingCount > 0 then
|
||||
@@ -172,5 +184,5 @@ end
|
||||
-- If all environments have the same proposed commit dry sha as the active one, we can consider the promotion strategy
|
||||
-- healthy. This means all environments are in sync and no further action is needed.
|
||||
hs.status = "Healthy"
|
||||
hs.message = "All environments are up-to-date on commit '" .. getShortSha(obj.status.environments[1].proposed.dry.sha) .. "'."
|
||||
hs.message = "All environments are up-to-date on commit '" .. getShortSha(getProposedDrySha(obj.status.environments[1])) .. "'."
|
||||
return hs
|
||||
|
||||
@@ -47,3 +47,7 @@ tests:
|
||||
status: Degraded
|
||||
message: "Promotion strategy reconciliation failed (ChangeTransferPolicyNotReady): ChangeTransferPolicy \"strategy-environments-qal-usw2-27894e05\" is not Ready because \"ReconciliationError\": Reconciliation failed: failed to calculate ChangeTransferPolicy status: failed to get SHAs for proposed branch \"environments/qal-usw2-next\": exit status 128: fatal: 'origin/environments/qal-usw2-next' is not a commit and a branch 'environments/qal-usw2-next' cannot be created from it"
|
||||
inputPath: testdata/missing-sha-and-not-ready.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "Promotion in progress for environment 'dev' from 'abc1234' to 'new9999': 0 pending, 0 successful, 0 failed. "
|
||||
inputPath: testdata/proposed-note-dry-sha-preferred.yaml
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
apiVersion: promoter.argoproj.io/v1alpha1
|
||||
kind: PromotionStrategy
|
||||
metadata:
|
||||
generation: 1
|
||||
spec: {}
|
||||
status:
|
||||
conditions:
|
||||
- type: Ready
|
||||
status: "True"
|
||||
observedGeneration: 1
|
||||
environments:
|
||||
- branch: dev
|
||||
active:
|
||||
dry:
|
||||
sha: abc1234abcdef0
|
||||
proposed:
|
||||
dry:
|
||||
sha: old1111abcdef0
|
||||
note:
|
||||
drySha: new9999abcdef0
|
||||
- branch: prod
|
||||
active:
|
||||
dry:
|
||||
sha: abc1234abcdef0
|
||||
proposed:
|
||||
dry:
|
||||
sha: old2222abcdef0
|
||||
note:
|
||||
drySha: new9999abcdef0
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestClusterList(t *testing.T) {
|
||||
|
||||
last := ""
|
||||
expected := fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
|
||||
https://kubernetes.default.svc in-cluster %v Successful `, fixture.GetVersions(t).ServerVersion)
|
||||
https://kubernetes.default.svc in-cluster %v Successful `, fixture.GetVersions(t).ServerVersion.String())
|
||||
|
||||
ctx := clusterFixture.Given(t)
|
||||
ctx.Project(fixture.ProjectName)
|
||||
@@ -64,7 +64,7 @@ func TestClusterAdd(t *testing.T) {
|
||||
List().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion.String(), fixture.ProjectName))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestClusterAddAllowed(t *testing.T) {
|
||||
List().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion.String(), fixture.ProjectName))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func TestClusterGet(t *testing.T) {
|
||||
|
||||
assert.Contains(t, output, "name: in-cluster")
|
||||
assert.Contains(t, output, "server: https://kubernetes.default.svc")
|
||||
assert.Contains(t, output, fmt.Sprintf(`serverVersion: "%v"`, fixture.GetVersions(t).ServerVersion))
|
||||
assert.Contains(t, output, fmt.Sprintf(`serverVersion: %v`, fixture.GetVersions(t).ServerVersion.String()))
|
||||
assert.Contains(t, output, `config:
|
||||
tlsClientConfig:
|
||||
insecure: false`)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
. "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
||||
. "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
@@ -163,7 +164,7 @@ func TestCustomToolWithEnv(t *testing.T) {
|
||||
assert.Equal(t, "bar", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
|
||||
expectedKubeVersion := version.MustParseGeneric(fixture.GetVersions(t).ServerVersion.GitVersion).String()
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedKubeVersion, output)
|
||||
@@ -273,7 +274,7 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
assert.Equal(t, "baz", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
|
||||
expectedKubeVersion := version.MustParseGeneric(fixture.GetVersions(t).ServerVersion.GitVersion).String()
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedKubeVersion, output)
|
||||
|
||||
@@ -2,13 +2,13 @@ package fixture
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/argo"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
@@ -20,23 +20,19 @@ type Versions struct {
|
||||
}
|
||||
|
||||
type Version struct {
|
||||
Major, Minor string
|
||||
Major, Minor, GitVersion string
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
return v.Format("%s.%s")
|
||||
}
|
||||
|
||||
func (v Version) Format(format string) string {
|
||||
return fmt.Sprintf(format, v.Major, v.Minor)
|
||||
return "v" + version.MustParseGeneric(v.GitVersion).String()
|
||||
}
|
||||
|
||||
func GetVersions(t *testing.T) *Versions {
|
||||
t.Helper()
|
||||
output := errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "version", "-o", "json")).(string)
|
||||
version := &Versions{}
|
||||
require.NoError(t, json.Unmarshal([]byte(output), version))
|
||||
return version
|
||||
versions := &Versions{}
|
||||
require.NoError(t, json.Unmarshal([]byte(output), versions))
|
||||
return versions
|
||||
}
|
||||
|
||||
func GetApiResources(t *testing.T) string { //nolint:revive //FIXME(var-naming)
|
||||
|
||||
@@ -356,7 +356,7 @@ func TestKubeVersion(t *testing.T) {
|
||||
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.kubeVersion}")).(string)
|
||||
// Capabilities.KubeVersion defaults to 1.9.0, we assume here you are running a later version
|
||||
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.Format("v%s.%s"), kubeVersion)
|
||||
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.String(), kubeVersion)
|
||||
}).
|
||||
When().
|
||||
// Make sure override works.
|
||||
|
||||
@@ -306,8 +306,7 @@ func TestKustomizeKubeVersion(t *testing.T) {
|
||||
And(func(_ *Application) {
|
||||
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.kubeVersion}")).(string)
|
||||
// Capabilities.KubeVersion defaults to 1.9.0, we assume here you are running a later version
|
||||
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.Format("v%s.%s"), kubeVersion)
|
||||
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.String(), kubeVersion)
|
||||
}).
|
||||
When().
|
||||
// Make sure override works.
|
||||
|
||||
137
ui/src/app/applications/components/resource-icon.test.tsx
Normal file
137
ui/src/app/applications/components/resource-icon.test.tsx
Normal file
@@ -0,0 +1,137 @@
|
||||
import * as React from 'react';
|
||||
import * as renderer from 'react-test-renderer';
|
||||
import {ResourceIcon} from './resource-icon';
|
||||
|
||||
// Mock the resourceIcons and resourceCustomizations
|
||||
jest.mock('./resources', () => ({
|
||||
resourceIcons: new Map([
|
||||
['Ingress', 'ing'],
|
||||
['ConfigMap', 'cm'],
|
||||
['Deployment', 'deploy'],
|
||||
['Service', 'svc']
|
||||
])
|
||||
}));
|
||||
|
||||
jest.mock('./resource-customizations', () => ({
|
||||
resourceIconGroups: {
|
||||
'*.crossplane.io': true,
|
||||
'*.fluxcd.io': true,
|
||||
'cert-manager.io': true
|
||||
}
|
||||
}));
|
||||
|
||||
describe('ResourceIcon', () => {
|
||||
describe('kind-based icons (no group)', () => {
|
||||
it('should show kind-based icon for ConfigMap without group', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='' kind='ConfigMap' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
expect(imgs[0].props.src).toBe('assets/images/resources/cm.svg');
|
||||
});
|
||||
|
||||
it('should show kind-based icon for Deployment without group', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='' kind='Deployment' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
expect(imgs[0].props.src).toBe('assets/images/resources/deploy.svg');
|
||||
});
|
||||
});
|
||||
|
||||
describe('group-based icons (with matching group)', () => {
|
||||
it('should show group-based icon for exact group match', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='cert-manager.io' kind='Certificate' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
expect(imgs[0].props.src).toBe('assets/images/resources/cert-manager.io/icon.svg');
|
||||
});
|
||||
|
||||
it('should show group-based icon for wildcard group match (crossplane)', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='pkg.crossplane.io' kind='Provider' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
// Wildcard '*' should be replaced with '_' in the path
|
||||
expect(imgs[0].props.src).toBe('assets/images/resources/_.crossplane.io/icon.svg');
|
||||
|
||||
const complexTestRenderer = renderer.create(<ResourceIcon group='identify.provider.crossplane.io' kind='Provider' />);
|
||||
const complexTestInstance = complexTestRenderer.root;
|
||||
const complexImgs = complexTestInstance.findAllByType('img');
|
||||
expect(complexImgs.length).toBeGreaterThan(0);
|
||||
// Wildcard '*' should be replaced with '_' in the path
|
||||
expect(complexImgs[0].props.src).toBe('assets/images/resources/_.crossplane.io/icon.svg');
|
||||
});
|
||||
|
||||
it('should show group-based icon for wildcard group match (fluxcd)', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='source.fluxcd.io' kind='GitRepository' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
expect(imgs[0].props.src).toBe('assets/images/resources/_.fluxcd.io/icon.svg');
|
||||
});
|
||||
});
|
||||
|
||||
describe('fallback to kind-based icons (with non-matching group) - THIS IS THE BUG FIX', () => {
|
||||
it('should fallback to kind-based icon for Ingress with networking.k8s.io group', () => {
|
||||
// This is the main bug fix test case
|
||||
// Ingress has group 'networking.k8s.io' which is NOT in resourceCustomizations
|
||||
// But Ingress IS in resourceIcons, so it should still show the icon
|
||||
const testRenderer = renderer.create(<ResourceIcon group='networking.k8s.io' kind='Ingress' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
expect(imgs[0].props.src).toBe('assets/images/resources/ing.svg');
|
||||
});
|
||||
|
||||
it('should fallback to kind-based icon for Service with core group', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='' kind='Service' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
expect(imgs[0].props.src).toBe('assets/images/resources/svc.svg');
|
||||
});
|
||||
});
|
||||
|
||||
describe('fallback to initials (no matching group or kind)', () => {
|
||||
it('should show initials for unknown resource with unknown group', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='unknown.example.io' kind='UnknownResource' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBe(0);
|
||||
// Should show initials "UR" (uppercase letters from UnknownResource)
|
||||
const spans = testInstance.findAllByType('span');
|
||||
const textSpan = spans.find(s => s.children.includes('UR'));
|
||||
expect(textSpan).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should show initials for MyCustomKind', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='' kind='MyCustomKind' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBe(0);
|
||||
// Should show initials "MCK"
|
||||
const spans = testInstance.findAllByType('span');
|
||||
const textSpan = spans.find(s => s.children.includes('MCK'));
|
||||
expect(textSpan).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe('special cases', () => {
|
||||
it('should show node icon for kind=node', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='' kind='node' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const imgs = testInstance.findAllByType('img');
|
||||
expect(imgs.length).toBeGreaterThan(0);
|
||||
expect(imgs[0].props.src).toBe('assets/images/infrastructure_components/node.svg');
|
||||
});
|
||||
|
||||
it('should show application icon for kind=Application', () => {
|
||||
const testRenderer = renderer.create(<ResourceIcon group='' kind='Application' />);
|
||||
const testInstance = testRenderer.root;
|
||||
const icons = testInstance.findAll(node => node.type === 'i' && typeof node.props.className === 'string' && node.props.className.includes('argo-icon-application'));
|
||||
expect(icons.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -10,17 +10,18 @@ export const ResourceIcon = ({group, kind, customStyle}: {group: string; kind: s
|
||||
if (kind === 'Application') {
|
||||
return <i title={kind} className={`icon argo-icon-application`} style={customStyle} />;
|
||||
}
|
||||
if (!group) {
|
||||
const i = resourceIcons.get(kind);
|
||||
if (i !== undefined) {
|
||||
return <img src={'assets/images/resources/' + i + '.svg'} alt={kind} style={{padding: '2px', width: '40px', height: '32px', ...customStyle}} />;
|
||||
}
|
||||
} else {
|
||||
// First, check for group-based custom icons
|
||||
if (group) {
|
||||
const matchedGroup = matchGroupToResource(group);
|
||||
if (matchedGroup) {
|
||||
return <img src={`assets/images/resources/${matchedGroup}/icon.svg`} alt={kind} style={{paddingBottom: '2px', width: '40px', height: '32px', ...customStyle}} />;
|
||||
}
|
||||
}
|
||||
// Fallback to kind-based icons (works for both empty group and non-matching groups)
|
||||
const i = resourceIcons.get(kind);
|
||||
if (i !== undefined) {
|
||||
return <img src={'assets/images/resources/' + i + '.svg'} alt={kind} style={{padding: '2px', width: '40px', height: '32px', ...customStyle}} />;
|
||||
}
|
||||
const initials = kind.replace(/[a-z]/g, '');
|
||||
const n = initials.length;
|
||||
const style: React.CSSProperties = {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
@@ -37,8 +38,13 @@ func (db *db) getLocalCluster() *appv1.Cluster {
|
||||
initLocalCluster.Do(func() {
|
||||
info, err := db.kubeclientset.Discovery().ServerVersion()
|
||||
if err == nil {
|
||||
//nolint:staticcheck
|
||||
localCluster.ServerVersion = fmt.Sprintf("%s.%s", info.Major, info.Minor)
|
||||
ver, verErr := version.ParseGeneric(info.GitVersion)
|
||||
if verErr == nil {
|
||||
//nolint:staticcheck
|
||||
localCluster.ServerVersion = ver.String()
|
||||
} else {
|
||||
log.Warnf("Failed to parse Kubernetes server version: %v", verErr)
|
||||
}
|
||||
//nolint:staticcheck
|
||||
localCluster.ConnectionState = appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful}
|
||||
} else {
|
||||
|
||||
@@ -83,7 +83,7 @@ func (t testNormalizer) Normalize(un *unstructured.Unstructured) error {
|
||||
}
|
||||
case "postgresql.cnpg.io":
|
||||
if un.GetKind() == "Cluster" {
|
||||
if err := unstructured.SetNestedStringMap(un.Object, map[string]string{"cnpg.io/reloadedAt": "0001-01-01T00:00:00Z", "kubectl.kubernetes.io/restartedAt": "0001-01-01T00:00:00Z"}, "metadata", "annotations"); err != nil {
|
||||
if err := setPgClusterAnnotations(un); err != nil {
|
||||
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
|
||||
}
|
||||
if err := unstructured.SetNestedField(un.Object, nil, "status", "targetPrimaryTimestamp"); err != nil {
|
||||
@@ -136,6 +136,22 @@ func setFluxRequestedAtAnnotation(un *unstructured.Unstructured) error {
|
||||
return unstructured.SetNestedStringMap(un.Object, map[string]string{"reconcile.fluxcd.io/requestedAt": "By Argo CD at: 0001-01-01T00:00:00"}, "metadata", "annotations")
|
||||
}
|
||||
|
||||
// Helper: normalize PostgreSQL CNPG Cluster annotations while preserving existing ones
|
||||
func setPgClusterAnnotations(un *unstructured.Unstructured) error {
|
||||
// Get existing annotations or create an empty map
|
||||
existingAnnotations, _, _ := unstructured.NestedStringMap(un.Object, "metadata", "annotations")
|
||||
if existingAnnotations == nil {
|
||||
existingAnnotations = make(map[string]string)
|
||||
}
|
||||
|
||||
// Update only the specific keys
|
||||
existingAnnotations["cnpg.io/reloadedAt"] = "0001-01-01T00:00:00Z"
|
||||
existingAnnotations["kubectl.kubernetes.io/restartedAt"] = "0001-01-01T00:00:00Z"
|
||||
|
||||
// Set the updated annotations back
|
||||
return unstructured.SetNestedStringMap(un.Object, existingAnnotations, "metadata", "annotations")
|
||||
}
|
||||
|
||||
func (t testNormalizer) normalizeJob(un *unstructured.Unstructured) error {
|
||||
if conditions, exist, err := unstructured.NestedSlice(un.Object, "status", "conditions"); err != nil {
|
||||
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
|
||||
|
||||
Reference in New Issue
Block a user