Compare commits

..

13 Commits

Author SHA1 Message Date
github-actions[bot]
34ccdfc3d5 Bump version to 3.3.4 on release-3.3 branch (#26854)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: reggie-k <19544836+reggie-k@users.noreply.github.com>
2026-03-16 12:51:36 +02:00
Regina Voloshin
01b86e7900 docs: clarify cluster version change impact for ClusterGenerator, CMP Plugins and migration (#26851)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
2026-03-16 12:21:24 +02:00
argo-cd-cherry-pick-bot[bot]
182e4c62b2 fix(ci): Add missing git-lfs installer checksum for ppc64le (cherry-pick #26835 for 3.3) (#26836)
Signed-off-by: Oliver Gondža <ogondza@gmail.com>
Co-authored-by: Oliver Gondža <ogondza@gmail.com>
2026-03-14 18:25:32 +02:00
Blake Pettersson
e164f8c50b chore: bump otel-sdk (release-3.3) (#26808)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
2026-03-12 15:34:12 +02:00
Soumya Ghosh Dastidar
2fcc40a0fc fix: skip token refresh threshold parsing in unrelated components (cherry-pick 3.3) (#26806)
Signed-off-by: Soumya Ghosh Dastidar <gdsoumya@gmail.com>
2026-03-12 01:18:14 -10:00
github-actions[bot]
ff239dcd20 Bump version to 3.3.3 on release-3.3 branch (#26752)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2026-03-09 11:25:28 -04:00
argo-cd-cherry-pick-bot[bot]
4411801980 fix(health): use note.drySha when available (cherry-pick #26698 for 3.3) (#26750)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2026-03-09 10:46:46 -04:00
Papapetrou Patroklos
c6df35db8e fix: consistency of kubeversion with helm version 3 3 (#26744)
Signed-off-by: Patroklos Papapetrou <ppapapetrou76@gmail.com>
2026-03-09 13:36:46 +02:00
argo-cd-cherry-pick-bot[bot]
6224d6787e fix(actions): Use correct annotation for CNPG suspend/resume (cherry-pick #26711 for 3.3) (#26727)
Signed-off-by: Rouke Broersma <rouke.broersma@infosupport.com>
Co-authored-by: Rouke Broersma <rouke.broersma@infosupport.com>
2026-03-08 16:40:27 +02:00
Alexandre Gaudreault
5e190219c9 fix: multi-level cross-namespace hierarchy traversal for cluster-scop… (#26640)
Signed-off-by: Jonathan Ogilvie <jonathan.ogilvie@sumologic.com>
Signed-off-by: Jonathan Ogilvie <679297+jcogilvie@users.noreply.github.com>
Co-authored-by: Jonathan Ogilvie <679297+jcogilvie@users.noreply.github.com>
2026-03-04 13:51:40 -05:00
argo-cd-cherry-pick-bot[bot]
968c6338a7 fix(controller): handle comma-separated hook annotations for PreDelete/PostDelete hooks (cherry-pick #26420 for 3.3) (#26586)
Signed-off-by: linghaoSu <linghao.su@daocloud.io>
Co-authored-by: Linghao Su <linghao.su@daocloud.io>
2026-02-24 00:37:40 -10:00
argo-cd-cherry-pick-bot[bot]
3d3760f4b4 fix(ui): standard resource icons are not displayed properly.#26216 (cherry-pick #26228 for 3.3) (#26380)
Signed-off-by: linghaoSu <linghao.su@daocloud.io>
Co-authored-by: Linghao Su <linghao.su@daocloud.io>
2026-02-24 17:29:26 +09:00
argo-cd-cherry-pick-bot[bot]
c61c5931ce chore: use base ref for cherry-pick prs (cherry-pick #26551 for 3.3) (#26553)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: Blake Pettersson <blake.pettersson@gmail.com>
2026-02-23 01:06:39 +01:00
49 changed files with 795 additions and 160 deletions

View File

@@ -1 +1 @@
3.3.2
3.3.4

View File

@@ -72,7 +72,7 @@ func Test_loadClusters(t *testing.T) {
ConnectionState: v1alpha1.ConnectionState{
Status: "Successful",
},
ServerVersion: ".",
ServerVersion: "0.0.0",
Shard: ptr.To(int64(0)),
},
Namespaces: []string{"test"},

View File

@@ -3,6 +3,7 @@ package controller
import (
"context"
"fmt"
"slices"
"strings"
"github.com/argoproj/gitops-engine/pkg/health"
@@ -43,8 +44,12 @@ func isHookOfType(obj *unstructured.Unstructured, hookType HookType) bool {
}
for k, v := range hookTypeAnnotations[hookType] {
if val, ok := obj.GetAnnotations()[k]; ok && val == v {
return true
if val, ok := obj.GetAnnotations()[k]; ok {
if slices.ContainsFunc(strings.Split(val, ","), func(item string) bool {
return strings.TrimSpace(item) == v
}) {
return true
}
}
}
return false

View File

@@ -127,6 +127,16 @@ func TestIsPreDeleteHook(t *testing.T) {
annot: map[string]string{"argocd.argoproj.io/hook": "PostDelete"},
expected: false,
},
{
name: "Helm PreDelete & PreDelete hook",
annot: map[string]string{"helm.sh/hook": "pre-delete,post-delete"},
expected: true,
},
{
name: "ArgoCD PostDelete & PreDelete hook",
annot: map[string]string{"argocd.argoproj.io/hook": "PostDelete,PreDelete"},
expected: true,
},
}
for _, tt := range tests {
@@ -160,6 +170,16 @@ func TestIsPostDeleteHook(t *testing.T) {
annot: map[string]string{"argocd.argoproj.io/hook": "PreDelete"},
expected: false,
},
{
name: "ArgoCD PostDelete & PreDelete hook",
annot: map[string]string{"argocd.argoproj.io/hook": "PostDelete,PreDelete"},
expected: true,
},
{
name: "Helm PostDelete & PreDelete hook",
annot: map[string]string{"helm.sh/hook": "post-delete,pre-delete"},
expected: true,
},
}
for _, tt := range tests {
@@ -171,3 +191,38 @@ func TestIsPostDeleteHook(t *testing.T) {
})
}
}
func TestMultiHookOfType(t *testing.T) {
tests := []struct {
name string
hookType []HookType
annot map[string]string
expected bool
}{
{
name: "helm PreDelete & PostDelete hook",
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
annot: map[string]string{"helm.sh/hook": "pre-delete,post-delete"},
expected: true,
},
{
name: "ArgoCD PreDelete & PostDelete hook",
hookType: []HookType{PreDeleteHookType, PostDeleteHookType},
annot: map[string]string{"argocd.argoproj.io/hook": "PreDelete,PostDelete"},
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj := &unstructured.Unstructured{}
obj.SetAnnotations(tt.annot)
for _, hookType := range tt.hookType {
result := isHookOfType(obj, hookType)
assert.Equal(t, tt.expected, result)
}
})
}
}

View File

@@ -152,14 +152,14 @@ spec:
- clusters:
selector:
matchLabels:
argocd.argoproj.io/kubernetes-version: 1.28
argocd.argoproj.io/kubernetes-version: v1.28.1
# matchExpressions are also supported.
#matchExpressions:
# - key: argocd.argoproj.io/kubernetes-version
# operator: In
# values:
# - "1.27"
# - "1.28"
# - "v1.27.1"
# - "v1.28.1"
```
### Pass additional key-value pairs via `values` field

View File

@@ -73,6 +73,26 @@ This design change improves repository cleanliness, reduces unnecessary commit n
Review your automation workflows and repository maintenance scripts to ensure that old or unwanted files in application paths are cleaned up if necessary. Consider implementing a periodic manual or automated cleanup procedure if your use case requires it.
- For more details on current behavior, see the [Source Hydrator user guide](../../user-guide/source-hydrator.md).
### Cluster version format change
**New behavior:**
3.3.3 now stores the cluster version in a more detailed format, `vMajor.Minor.Patch` compared to the previous format `Major.Minor`.
This change is aligning how ArgoCD interprets K8s cluster version with how Helm `3.19.0` and above interprets it.
This change makes it easier to compare versions and to support future features. It also allows for more accurate version comparisons and better compatibility with future Kubernetes releases.
**Impact:**
Application Sets with Cluster Generators, that fetch clusters based on their Kubernetes version and use `argocd.argoproj.io/auto-label-cluster-info` on the cluster secret, need to be updated to use `argocd.argoproj.io/kubernetes-version` with the `vMajor.Minor.Patch` format instead of the previous `Major.Minor` format.
More details [here](../applicationset/Generators-Cluster.md#fetch-clusters-based-on-their-k8s-version).
Additionally, API, UI and CLI commands that retrieve cluster information now return the version in the `vMajor.Minor.Patch` format.
The env variable $KUBE_VERSION that is used with Argo CD CMP Plugins remains unchanged and returns the version in `Major.Minor.Patch` format, so CMP Plugins are not impacted.
### Anonymous call to Settings API returns fewer fields
The Settings API now returns less information when accessed anonymously.
@@ -92,8 +112,7 @@ removed in a future release.
## Helm Upgraded to 3.19.4
Argo CD v3.3 upgrades the bundled Helm version to 3.19.4. There are no breaking changes in Helm 3.19.4 according to the
[release notes](https://github.com/helm/helm/releases/tag/v3.19.0).
Argo CD v3.3 upgrades the bundled Helm version to 3.19.4. This Helm release interprets K8s version in a semantic version format of `vMajor.Minor.Patch`, instead of the previous `vMajor.Minor` format. This led to a breaking change in Argo CD described [above](#cluster-version-format-change).
## Kustomize Upgraded to 5.8.1
@@ -118,3 +137,11 @@ If you rely on Helm charts within kustomization files, please review the details
* [services.cloud.sap.com/ServiceBinding](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
* [services.cloud.sap.com/ServiceInstance](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
* [\_.cnrm.cloud.google.com/\_](https://github.com/argoproj/argo-cd/commit/30abebda3d930d93065eec8864aac7e0d56ae119)
## More detailed cluster version
3.3.3 now stores the cluster version in a more detailed format, Major.Minor.Patch compared to the previous format Major.Minor.
This change is to make it easier to compare versions and to support future features.
This change also allows for more accurate version comparisons and better compatibility with future Kubernetes releases.
Users will notice it in the UI and the CLI commands that retrieve cluster information.

View File

@@ -1213,7 +1213,9 @@ func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(r
}
// processCrossNamespaceChildren processes namespaced children of cluster-scoped resources
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries
// This enables traversing from cluster-scoped parents to their namespaced children across namespace boundaries.
// It also handles multi-level hierarchies where cluster-scoped resources own other cluster-scoped resources
// that in turn own namespaced resources (e.g., Provider -> ProviderRevision -> Deployment in Crossplane).
func (c *clusterCache) processCrossNamespaceChildren(
clusterScopedKeys []kube.ResourceKey,
visited map[kube.ResourceKey]int,
@@ -1230,7 +1232,21 @@ func (c *clusterCache) processCrossNamespaceChildren(
childKeys := c.parentUIDToChildren[clusterResource.Ref.UID]
for _, childKey := range childKeys {
child := c.resources[childKey]
if child == nil || visited[childKey] != 0 {
if child == nil {
continue
}
alreadyVisited := visited[childKey] != 0
// If child is cluster-scoped and was already visited by processNamespaceHierarchy,
// we still need to recursively check for its cross-namespace children.
// This handles multi-level hierarchies like: ClusterScoped -> ClusterScoped -> Namespaced
// (e.g., Crossplane's Provider -> ProviderRevision -> Deployment)
if alreadyVisited {
if childKey.Namespace == "" {
// Recursively process cross-namespace children of this cluster-scoped child
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
}
continue
}
@@ -1245,6 +1261,12 @@ func (c *clusterCache) processCrossNamespaceChildren(
visited[childKey] = 1
// Recursively process descendants using index-based traversal
c.iterateChildrenUsingIndex(child, nsNodes, visited, action)
// If this child is also cluster-scoped, recursively process its cross-namespace children
if childKey.Namespace == "" {
c.processCrossNamespaceChildren([]kube.ResourceKey{childKey}, visited, action)
}
visited[childKey] = 2
}
}

View File

@@ -1350,6 +1350,98 @@ func TestIterateHierarchyV2_ClusterScopedParent_FindsAllChildren(t *testing.T) {
assert.ElementsMatch(t, expected, keys)
}
func TestIterateHierarchyV2_MultiLevelClusterScoped_FindsNamespacedGrandchildren(t *testing.T) {
// Test 3-level hierarchy: ClusterScoped -> ClusterScoped -> Namespaced
// This test the scenario where:
// Provider (managed) -> ProviderRevision (dynamic) -> Deployment (namespaced)
// The namespaced grandchildren should be found even when only the root is passed as a key.
// Level 1: Cluster-scoped parent (like Provider - this is the "managed" resource)
clusterParent := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: "root-cluster-parent",
UID: "root-parent-uid",
ResourceVersion: "1",
},
}
// Level 2: Cluster-scoped intermediate (like ProviderRevision - dynamically created, NOT managed)
clusterIntermediate := &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: "intermediate-cluster-child",
UID: "intermediate-uid",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Namespace",
Name: "root-cluster-parent",
UID: "root-parent-uid",
}},
},
}
// Level 3: Namespaced grandchild (like Deployment owned by ProviderRevision)
namespacedGrandchild := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: "namespaced-grandchild",
Namespace: "some-namespace",
UID: "grandchild-uid",
ResourceVersion: "1",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
Name: "intermediate-cluster-child",
UID: "intermediate-uid",
}},
},
}
cluster := newCluster(t, clusterParent, clusterIntermediate, namespacedGrandchild).WithAPIResources([]kube.APIResourceInfo{
{
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
Meta: metav1.APIResource{Namespaced: false},
},
{
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
Meta: metav1.APIResource{Namespaced: false},
},
})
err := cluster.EnsureSynced()
require.NoError(t, err)
// Only pass the root cluster-scoped parent as a key (simulating managed resources)
// The intermediate and grandchild should be discovered through traversal
keys := []kube.ResourceKey{}
cluster.IterateHierarchyV2(
[]kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(clusterParent))},
func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
keys = append(keys, resource.ResourceKey())
return true
},
)
// Should find all 3 levels: parent, intermediate, AND the namespaced grandchild
expected := []kube.ResourceKey{
kube.GetResourceKey(mustToUnstructured(clusterParent)),
kube.GetResourceKey(mustToUnstructured(clusterIntermediate)),
kube.GetResourceKey(mustToUnstructured(namespacedGrandchild)), // This is the bug - currently NOT found
}
assert.ElementsMatch(t, expected, keys)
}
func TestIterateHierarchyV2_ClusterScopedParentOnly_InferredUID(t *testing.T) {
// Test that passing only a cluster-scoped parent finds children even with inferred UIDs.
@@ -1912,6 +2004,118 @@ func BenchmarkIterateHierarchyV2_ClusterParentTraversal(b *testing.B) {
}
}
// BenchmarkIterateHierarchyV2_MultiLevelClusterScoped tests the performance of
// multi-level cluster-scoped hierarchies: ClusterScoped -> ClusterScoped -> Namespaced
func BenchmarkIterateHierarchyV2_MultiLevelClusterScoped(b *testing.B) {
testCases := []struct {
name string
intermediateChildren int // Number of intermediate cluster-scoped children per root
namespacedGrandchildren int // Number of namespaced grandchildren per intermediate
totalNamespaces int
}{
// Baseline: no multi-level hierarchy
{"NoMultiLevel", 0, 0, 10},
// Typical Crossplane scenario: 1 ProviderRevision per Provider, few Deployments
{"1Intermediate_5Grandchildren", 1, 5, 10},
// Multiple ProviderRevisions per Provider
{"5Intermediate_5Grandchildren", 5, 5, 10},
// Larger hierarchy
{"10Intermediate_10Grandchildren", 10, 10, 20},
// Stress test
{"20Intermediate_20Grandchildren", 20, 20, 50},
}
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
cluster := newCluster(b).WithAPIResources([]kube.APIResourceInfo{{
GroupKind: schema.GroupKind{Group: "", Kind: "Namespace"},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
Meta: metav1.APIResource{Namespaced: false},
}, {
GroupKind: schema.GroupKind{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
GroupVersionResource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"},
Meta: metav1.APIResource{Namespaced: false},
}, {
GroupKind: schema.GroupKind{Group: "", Kind: "Pod"},
GroupVersionResource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
Meta: metav1.APIResource{Namespaced: true},
}})
cluster.namespacedResources = map[schema.GroupKind]bool{
{Group: "", Kind: "Pod"}: true,
{Group: "", Kind: "Namespace"}: false,
{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: false,
}
// Create root cluster-scoped parent (Namespace, simulating Provider)
rootUID := uuid.New().String()
rootYaml := fmt.Sprintf(`
apiVersion: v1
kind: Namespace
metadata:
name: root-parent
uid: %s`, rootUID)
rootKey := kube.ResourceKey{Kind: "Namespace", Name: "root-parent"}
cluster.setNode(cacheTest.newResource(strToUnstructured(rootYaml)))
// Create intermediate cluster-scoped children (ClusterRoles, simulating ProviderRevisions)
intermediateUIDs := make([]string, tc.intermediateChildren)
for i := 0; i < tc.intermediateChildren; i++ {
uid := uuid.New().String()
intermediateUIDs[i] = uid
name := fmt.Sprintf("intermediate-%d", i)
intermediateYaml := fmt.Sprintf(`
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: %s
uid: %s
ownerReferences:
- apiVersion: v1
kind: Namespace
name: root-parent
uid: %s
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]`, name, uid, rootUID)
cluster.setNode(cacheTest.newResource(strToUnstructured(intermediateYaml)))
}
// Create namespaced grandchildren (Pods, simulating Deployments)
for i := 0; i < tc.intermediateChildren; i++ {
for j := 0; j < tc.namespacedGrandchildren; j++ {
nsIdx := (i*tc.namespacedGrandchildren + j) % tc.totalNamespaces
namespace := fmt.Sprintf("ns-%d", nsIdx)
podName := fmt.Sprintf("grandchild-%d-%d", i, j)
podUID := uuid.New().String()
podYaml := fmt.Sprintf(`
apiVersion: v1
kind: Pod
metadata:
name: %s
namespace: %s
uid: %s
ownerReferences:
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
name: intermediate-%d
uid: %s`, podName, namespace, podUID, i, intermediateUIDs[i])
cluster.setNode(cacheTest.newResource(strToUnstructured(podYaml)))
}
}
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
cluster.IterateHierarchyV2([]kube.ResourceKey{rootKey}, func(_ *Resource, _ map[kube.ResourceKey]*Resource) bool {
return true
})
}
})
}
}
func TestIterateHierarchyV2_NoDuplicatesInSameNamespace(t *testing.T) {
// Create a parent-child relationship in the same namespace

View File

@@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/managedfields"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
@@ -349,7 +350,15 @@ func (k *KubectlCmd) GetServerVersion(config *rest.Config) (string, error) {
if err != nil {
return "", fmt.Errorf("failed to get server version: %w", err)
}
return fmt.Sprintf("%s.%s", v.Major, v.Minor), nil
ver, err := version.ParseGeneric(v.GitVersion)
if err != nil {
return "", fmt.Errorf("failed to parse server version: %w", err)
}
// ParseGeneric removes the leading "v" and any vendor-specific suffix (e.g. "-gke.100", "-eks-123", "+k3s1").
// Helm expects a semver-like Kubernetes version with a "v" prefix for capability checks, so we normalize the
// version to "v<major>.<minor>.<patch>".
return "v" + ver.String(), nil
}
func (k *KubectlCmd) NewDynamicClient(config *rest.Config) (dynamic.Interface, error) {

View File

@@ -4,10 +4,14 @@ import (
_ "embed"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
openapi_v2 "github.com/google/gnostic-models/openapiv2"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/rest"
"github.com/stretchr/testify/assert"
"k8s.io/klog/v2/textlogger"
@@ -69,6 +73,80 @@ func TestConvertToVersion(t *testing.T) {
})
}
func TestGetServerVersion(t *testing.T) {
t.Run("returns full semantic version with patch", func(t *testing.T) {
fakeServer := fakeHTTPServer(version.Info{
Major: "1",
Minor: "34",
GitVersion: "v1.34.0",
GitCommit: "abc123def456",
Platform: "linux/amd64",
}, nil)
defer fakeServer.Close()
config := mockConfig(fakeServer.URL)
serverVersion, err := kubectlCmd().GetServerVersion(config)
require.NoError(t, err)
assert.Equal(t, "v1.34.0", serverVersion, "Should return full semantic serverVersion")
assert.Regexp(t, `^v\d+\.\d+\.\d+`, serverVersion, "Should match semver pattern with 'v' prefix")
assert.NotEqual(t, "1.34", serverVersion, "Should not be old Major.Minor format")
})
t.Run("do not preserver build metadata", func(t *testing.T) {
fakeServer := fakeHTTPServer(version.Info{
Major: "1",
Minor: "30",
GitVersion: "v1.30.11+IKS",
GitCommit: "xyz789",
Platform: "linux/amd64",
}, nil)
defer fakeServer.Close()
config := mockConfig(fakeServer.URL)
serverVersion, err := kubectlCmd().GetServerVersion(config)
require.NoError(t, err)
assert.Equal(t, "v1.30.11", serverVersion, "Should not preserve build metadata")
assert.NotContains(t, serverVersion, "+IKS", "Should not contain provider-specific metadata")
assert.NotEqual(t, "1.30", serverVersion, "Should not strip to Major.Minor")
})
t.Run("handles error from discovery client", func(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer fakeServer.Close()
config := mockConfig(fakeServer.URL)
_, err := kubectlCmd().GetServerVersion(config)
assert.Error(t, err, "Should return error when server fails")
assert.Contains(t, err.Error(), "failed to get server version",
"Error should indicate version retrieval failure")
})
t.Run("handles minor version with plus suffix", func(t *testing.T) {
fakeServer := fakeHTTPServer(version.Info{
Major: "1",
Minor: "30+",
GitVersion: "v1.30.0",
}, nil)
defer fakeServer.Close()
config := mockConfig(fakeServer.URL)
serverVersion, err := kubectlCmd().GetServerVersion(config)
require.NoError(t, err)
assert.Equal(t, "v1.30.0", serverVersion)
assert.NotContains(t, serverVersion, "+", "Should not contain the '+' from Minor field")
})
}
func kubectlCmd() *KubectlCmd {
kubectl := &KubectlCmd{
Log: textlogger.NewLogger(textlogger.NewConfig()),
Tracer: tracing.NopTracer{},
}
return kubectl
}
/**
Getting the test data here was challenging.
@@ -108,3 +186,21 @@ func (f *fakeOpenAPIClient) OpenAPISchema() (*openapi_v2.Document, error) {
}
return document, nil
}
func mockConfig(host string) *rest.Config {
return &rest.Config{
Host: host,
}
}
func fakeHTTPServer(info version.Info, err error) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/version" {
versionInfo := info
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(versionInfo)
return
}
http.NotFound(w, r)
}))
}

10
go.mod
View File

@@ -90,9 +90,9 @@ require (
github.com/yuin/gopher-lua v1.1.1
gitlab.com/gitlab-org/api/client-go v1.8.1
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0
go.opentelemetry.io/otel v1.38.0
go.opentelemetry.io/otel v1.40.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
go.opentelemetry.io/otel/sdk v1.38.0
go.opentelemetry.io/otel/sdk v1.40.0
golang.org/x/crypto v0.46.0
golang.org/x/net v0.48.0
golang.org/x/oauth2 v0.34.0
@@ -275,13 +275,13 @@ require (
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.40.0 // indirect
go.opentelemetry.io/otel/trace v1.40.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/tools v0.39.0 // indirect
golang.org/x/tools/go/expect v0.1.1-deprecated // indirect

24
go.sum
View File

@@ -940,20 +940,20 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -1222,8 +1222,8 @@ golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b/go.mod h1:4ZwOYna0/zsOKwuR5X/m0QFOJpSZvAxFfkQT+Erd9D4=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View File

@@ -0,0 +1 @@
100fbefdd86722dafd56737121510289ece9574c7bb8ec01b4633f8892acc427 git-lfs-linux-ppc64le-v3.7.1.tar.gz

View File

@@ -0,0 +1 @@
d4b68db5d7cc34395b8d6c392326aeff98a297bde2053625560df6c76eb97c69 git-lfs-linux-s390x-v3.7.1.tar.gz

View File

@@ -45,6 +45,10 @@ fi
# if the tag has not been declared, and we are on a release branch, use the VERSION file.
if [ "$IMAGE_TAG" = "" ]; then
branch=$(git rev-parse --abbrev-ref HEAD)
# In GitHub Actions PRs, HEAD is detached; use GITHUB_BASE_REF (the target branch) instead
if [ "$branch" = "HEAD" ] && [ -n "${GITHUB_BASE_REF:-}" ]; then
branch="$GITHUB_BASE_REF"
fi
if [[ $branch = release-* ]]; then
pwd
IMAGE_TAG=v$(cat "$SRCROOT/VERSION")

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.3.2
newTag: v3.3.4

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.3.2
newTag: v3.3.4
resources:
- ./application-controller
- ./dex

View File

@@ -31273,7 +31273,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31408,7 +31408,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -31536,7 +31536,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -31833,7 +31833,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -31886,7 +31886,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -32234,7 +32234,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -31241,7 +31241,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31370,7 +31370,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -31667,7 +31667,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -31720,7 +31720,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -32068,7 +32068,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.3.2
newTag: v3.3.4

View File

@@ -12,7 +12,7 @@ patches:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.3.2
newTag: v3.3.4
resources:
- ../../base/application-controller
- ../../base/applicationset-controller

View File

@@ -32639,7 +32639,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -32774,7 +32774,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -32925,7 +32925,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -33021,7 +33021,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -33145,7 +33145,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -33468,7 +33468,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -33521,7 +33521,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -33895,7 +33895,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -34279,7 +34279,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -32609,7 +32609,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -32761,7 +32761,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -32857,7 +32857,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -32981,7 +32981,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -33304,7 +33304,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -33357,7 +33357,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -33731,7 +33731,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -34115,7 +34115,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1897,7 +1897,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2032,7 +2032,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2183,7 +2183,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2279,7 +2279,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2403,7 +2403,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2726,7 +2726,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2779,7 +2779,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -3153,7 +3153,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3537,7 +3537,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1867,7 +1867,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2019,7 +2019,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2115,7 +2115,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2239,7 +2239,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2562,7 +2562,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2615,7 +2615,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2989,7 +2989,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3373,7 +3373,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -31717,7 +31717,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31852,7 +31852,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -32003,7 +32003,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -32099,7 +32099,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -32201,7 +32201,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -32498,7 +32498,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -32551,7 +32551,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -32923,7 +32923,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -33307,7 +33307,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

16
manifests/install.yaml generated
View File

@@ -31685,7 +31685,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -31837,7 +31837,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -31933,7 +31933,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -32035,7 +32035,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -32332,7 +32332,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -32385,7 +32385,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -32757,7 +32757,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -33141,7 +33141,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -975,7 +975,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1110,7 +1110,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1261,7 +1261,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1357,7 +1357,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1459,7 +1459,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1756,7 +1756,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1809,7 +1809,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2181,7 +2181,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2565,7 +2565,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -943,7 +943,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1095,7 +1095,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1191,7 +1191,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1293,7 +1293,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1590,7 +1590,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1643,7 +1643,7 @@ spec:
command:
- sh
- -c
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2015,7 +2015,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2399,7 +2399,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.3.2
image: quay.io/argoproj/argocd:v3.3.4
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1,12 +1,18 @@
local actions = {}
-- https://github.com/cloudnative-pg/cloudnative-pg/tree/main/internal/cmd/plugin/restart
actions["restart"] = {
["iconClass"] = "fa fa-fw fa-recycle",
["displayName"] = "Rollout restart Cluster"
}
-- https://github.com/cloudnative-pg/cloudnative-pg/tree/main/internal/cmd/plugin/reload
actions["reload"] = {
["iconClass"] = "fa fa-fw fa-rotate-right",
["displayName"] = "Reload all Configuration"
}
-- https://github.com/cloudnative-pg/cloudnative-pg/tree/main/internal/cmd/plugin/promote
actions["promote"] = {
["iconClass"] = "fa fa-fw fa-angles-up",
["displayName"] = "Promote Replica to Primary",
@@ -19,9 +25,10 @@ actions["promote"] = {
}
}
-- Check if reconciliation is currently suspended
-- Suspend reconciliation loop for a cluster
-- https://cloudnative-pg.io/docs/1.28/failure_modes/#disabling-reconciliation
local isSuspended = false
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliation"] == "disabled" then
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliationLoop"] == "disabled" then
isSuspended = true
end

View File

@@ -6,5 +6,5 @@ if obj.metadata.annotations == nil then
obj.metadata.annotations = {}
end
obj.metadata.annotations["cnpg.io/reconciliation"] = nil
obj.metadata.annotations["cnpg.io/reconciliationLoop"] = nil
return obj

View File

@@ -6,5 +6,5 @@ if obj.metadata.annotations == nil then
obj.metadata.annotations = {}
end
obj.metadata.annotations["cnpg.io/reconciliation"] = "disabled"
obj.metadata.annotations["cnpg.io/reconciliationLoop"] = "disabled"
return obj

View File

@@ -33,7 +33,7 @@ function hibernating(obj)
end
-- Check if reconciliation is suspended, since this is an explicit user action we return the "suspended" status immediately
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliation"] == "disabled" then
if obj.metadata and obj.metadata.annotations and obj.metadata.annotations["cnpg.io/reconciliationLoop"] == "disabled" then
hs.status = "Suspended"
hs.message = "Cluster reconciliation is suspended"
return hs

View File

@@ -2,7 +2,7 @@ apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
annotations:
cnpg.io/reconciliation: "disabled"
cnpg.io/reconciliationLoop: "disabled"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"postgresql.cnpg.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"imageName":"ghcr.io/cloudnative-pg/postgresql:13","instances":3,"storage":{"size":"1Gi"}}}
creationTimestamp: "2025-04-25T20:44:24Z"

View File

@@ -56,6 +56,17 @@ if not obj.status.environments or #obj.status.environments == 0 then
return hs
end
-- Use note.drySha as canonical proposed SHA when present; fallback to proposed.dry.sha.
local function getProposedDrySha(env)
if env and env.proposed and env.proposed.note and env.proposed.note.drySha and env.proposed.note.drySha ~= "" then
return env.proposed.note.drySha
end
if env and env.proposed and env.proposed.dry and env.proposed.dry.sha and env.proposed.dry.sha ~= "" then
return env.proposed.dry.sha
end
return nil
end
-- Make sure there's a fully-populated status for both active and proposed commits in all environments. If anything is
-- missing or empty, return a Progressing status.
for _, env in ipairs(obj.status.environments) do
@@ -64,7 +75,7 @@ for _, env in ipairs(obj.status.environments) do
hs.message = "The active commit DRY SHA is missing or empty in environment '" .. env.branch .. "'."
return hs
end
if not env.proposed or not env.proposed.dry or not env.proposed.dry.sha or env.proposed.dry.sha == "" then
if not getProposedDrySha(env) then
hs.status = "Progressing"
hs.message = "The proposed commit DRY SHA is missing or empty in environment '" .. env.branch .. "'."
return hs
@@ -72,9 +83,9 @@ for _, env in ipairs(obj.status.environments) do
end
-- Check if all the proposed environments have the same proposed commit dry sha. If not, return a Progressing status.
local proposedSha = obj.status.environments[1].proposed.dry.sha -- Don't panic, Lua is 1-indexed.
local proposedSha = getProposedDrySha(obj.status.environments[1]) -- Don't panic, Lua is 1-indexed.
for _, env in ipairs(obj.status.environments) do
if env.proposed.dry.sha ~= proposedSha then
if getProposedDrySha(env) ~= proposedSha then
hs.status = "Progressing"
hs.message = "Not all environments have the same proposed commit SHA. This likely means the hydrator has not run for all environments yet."
return hs
@@ -96,7 +107,8 @@ end
-- statuses and build a summary about how many are pending, successful, or failed. Return a Progressing status for this
-- in-progress environment.
for _, env in ipairs(obj.status.environments) do
if env.proposed.dry.sha ~= env.active.dry.sha then
local envProposedSha = getProposedDrySha(env)
if envProposedSha ~= env.active.dry.sha then
local pendingCount = 0
local successCount = 0
local failureCount = 0
@@ -121,7 +133,7 @@ for _, env in ipairs(obj.status.environments) do
hs.message =
"Promotion in progress for environment '" .. env.branch ..
"' from '" .. getShortSha(env.active.dry.sha) ..
"' to '" .. getShortSha(env.proposed.dry.sha) .. "': " ..
"' to '" .. getShortSha(envProposedSha) .. "': " ..
pendingCount .. " pending, " .. successCount .. " successful, " .. failureCount .. " failed. "
if pendingCount > 0 then
@@ -172,5 +184,5 @@ end
-- If all environments have the same proposed commit dry sha as the active one, we can consider the promotion strategy
-- healthy. This means all environments are in sync and no further action is needed.
hs.status = "Healthy"
hs.message = "All environments are up-to-date on commit '" .. getShortSha(obj.status.environments[1].proposed.dry.sha) .. "'."
hs.message = "All environments are up-to-date on commit '" .. getShortSha(getProposedDrySha(obj.status.environments[1])) .. "'."
return hs

View File

@@ -47,3 +47,7 @@ tests:
status: Degraded
message: "Promotion strategy reconciliation failed (ChangeTransferPolicyNotReady): ChangeTransferPolicy \"strategy-environments-qal-usw2-27894e05\" is not Ready because \"ReconciliationError\": Reconciliation failed: failed to calculate ChangeTransferPolicy status: failed to get SHAs for proposed branch \"environments/qal-usw2-next\": exit status 128: fatal: 'origin/environments/qal-usw2-next' is not a commit and a branch 'environments/qal-usw2-next' cannot be created from it"
inputPath: testdata/missing-sha-and-not-ready.yaml
- healthStatus:
status: Progressing
message: "Promotion in progress for environment 'dev' from 'abc1234' to 'new9999': 0 pending, 0 successful, 0 failed. "
inputPath: testdata/proposed-note-dry-sha-preferred.yaml

View File

@@ -0,0 +1,30 @@
apiVersion: promoter.argoproj.io/v1alpha1
kind: PromotionStrategy
metadata:
generation: 1
spec: {}
status:
conditions:
- type: Ready
status: "True"
observedGeneration: 1
environments:
- branch: dev
active:
dry:
sha: abc1234abcdef0
proposed:
dry:
sha: old1111abcdef0
note:
drySha: new9999abcdef0
- branch: prod
active:
dry:
sha: abc1234abcdef0
proposed:
dry:
sha: old2222abcdef0
note:
drySha: new9999abcdef0

View File

@@ -1570,14 +1570,15 @@ func (server *ArgoCDServer) getClaims(ctx context.Context) (jwt.Claims, string,
}
finalClaims := claims
if server.settings.IsSSOConfigured() {
oidcConfig := server.settings.OIDCConfig()
if oidcConfig != nil || server.settings.IsDexConfigured() {
updatedClaims, err := server.ssoClientApp.SetGroupsFromUserInfo(ctx, claims, util_session.SessionManagerClaimsIssuer)
if err != nil {
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
}
finalClaims = updatedClaims
// OIDC tokens are automatically refreshed here prior to expiration
refreshedToken, err := server.ssoClientApp.CheckAndRefreshToken(ctx, updatedClaims, server.settings.OIDCRefreshTokenThreshold)
refreshedToken, err := server.ssoClientApp.CheckAndRefreshToken(ctx, updatedClaims, server.settings.RefreshTokenThresholdWithConfig(oidcConfig))
if err != nil {
log.Errorf("error checking and refreshing token: %v", err)
}

View File

@@ -23,7 +23,7 @@ func TestClusterList(t *testing.T) {
last := ""
expected := fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
https://kubernetes.default.svc in-cluster %v Successful `, fixture.GetVersions(t).ServerVersion)
https://kubernetes.default.svc in-cluster %v Successful `, fixture.GetVersions(t).ServerVersion.String())
ctx := clusterFixture.Given(t)
ctx.Project(fixture.ProjectName)
@@ -64,7 +64,7 @@ func TestClusterAdd(t *testing.T) {
List().
Then().
AndCLIOutput(func(output string, _ error) {
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion.String(), fixture.ProjectName))
})
}
@@ -119,7 +119,7 @@ func TestClusterAddAllowed(t *testing.T) {
List().
Then().
AndCLIOutput(func(output string, _ error) {
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion.String(), fixture.ProjectName))
})
}
@@ -175,7 +175,7 @@ func TestClusterGet(t *testing.T) {
assert.Contains(t, output, "name: in-cluster")
assert.Contains(t, output, "server: https://kubernetes.default.svc")
assert.Contains(t, output, fmt.Sprintf(`serverVersion: "%v"`, fixture.GetVersions(t).ServerVersion))
assert.Contains(t, output, fmt.Sprintf(`serverVersion: %v`, fixture.GetVersions(t).ServerVersion.String()))
assert.Contains(t, output, `config:
tlsClientConfig:
insecure: false`)

View File

@@ -10,6 +10,7 @@ import (
. "github.com/argoproj/gitops-engine/pkg/sync/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/version"
. "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
@@ -163,7 +164,7 @@ func TestCustomToolWithEnv(t *testing.T) {
assert.Equal(t, "bar", output)
}).
And(func(_ *Application) {
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
expectedKubeVersion := version.MustParseGeneric(fixture.GetVersions(t).ServerVersion.GitVersion).String()
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
require.NoError(t, err)
assert.Equal(t, expectedKubeVersion, output)
@@ -273,7 +274,7 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
assert.Equal(t, "baz", output)
}).
And(func(_ *Application) {
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
expectedKubeVersion := version.MustParseGeneric(fixture.GetVersions(t).ServerVersion.GitVersion).String()
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
require.NoError(t, err)
assert.Equal(t, expectedKubeVersion, output)

View File

@@ -2,13 +2,13 @@ package fixture
import (
"encoding/json"
"fmt"
"strings"
"testing"
"github.com/argoproj/gitops-engine/pkg/cache"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/version"
"github.com/argoproj/argo-cd/v3/util/argo"
"github.com/argoproj/argo-cd/v3/util/errors"
@@ -20,23 +20,19 @@ type Versions struct {
}
type Version struct {
Major, Minor string
Major, Minor, GitVersion string
}
func (v Version) String() string {
return v.Format("%s.%s")
}
func (v Version) Format(format string) string {
return fmt.Sprintf(format, v.Major, v.Minor)
return "v" + version.MustParseGeneric(v.GitVersion).String()
}
func GetVersions(t *testing.T) *Versions {
t.Helper()
output := errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "version", "-o", "json")).(string)
version := &Versions{}
require.NoError(t, json.Unmarshal([]byte(output), version))
return version
versions := &Versions{}
require.NoError(t, json.Unmarshal([]byte(output), versions))
return versions
}
func GetApiResources(t *testing.T) string { //nolint:revive //FIXME(var-naming)

View File

@@ -356,7 +356,7 @@ func TestKubeVersion(t *testing.T) {
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
"-o", "jsonpath={.data.kubeVersion}")).(string)
// Capabilities.KubeVersion defaults to 1.9.0, we assume here you are running a later version
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.Format("v%s.%s"), kubeVersion)
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.String(), kubeVersion)
}).
When().
// Make sure override works.

View File

@@ -306,8 +306,7 @@ func TestKustomizeKubeVersion(t *testing.T) {
And(func(_ *Application) {
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
"-o", "jsonpath={.data.kubeVersion}")).(string)
// Capabilities.KubeVersion defaults to 1.9.0, we assume here you are running a later version
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.Format("v%s.%s"), kubeVersion)
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.String(), kubeVersion)
}).
When().
// Make sure override works.

View File

@@ -0,0 +1,137 @@
import * as React from 'react';
import * as renderer from 'react-test-renderer';
import {ResourceIcon} from './resource-icon';
// Mock the resourceIcons and resourceCustomizations
jest.mock('./resources', () => ({
resourceIcons: new Map([
['Ingress', 'ing'],
['ConfigMap', 'cm'],
['Deployment', 'deploy'],
['Service', 'svc']
])
}));
jest.mock('./resource-customizations', () => ({
resourceIconGroups: {
'*.crossplane.io': true,
'*.fluxcd.io': true,
'cert-manager.io': true
}
}));
describe('ResourceIcon', () => {
describe('kind-based icons (no group)', () => {
it('should show kind-based icon for ConfigMap without group', () => {
const testRenderer = renderer.create(<ResourceIcon group='' kind='ConfigMap' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/cm.svg');
});
it('should show kind-based icon for Deployment without group', () => {
const testRenderer = renderer.create(<ResourceIcon group='' kind='Deployment' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/deploy.svg');
});
});
describe('group-based icons (with matching group)', () => {
it('should show group-based icon for exact group match', () => {
const testRenderer = renderer.create(<ResourceIcon group='cert-manager.io' kind='Certificate' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/cert-manager.io/icon.svg');
});
it('should show group-based icon for wildcard group match (crossplane)', () => {
const testRenderer = renderer.create(<ResourceIcon group='pkg.crossplane.io' kind='Provider' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
// Wildcard '*' should be replaced with '_' in the path
expect(imgs[0].props.src).toBe('assets/images/resources/_.crossplane.io/icon.svg');
const complexTestRenderer = renderer.create(<ResourceIcon group='identify.provider.crossplane.io' kind='Provider' />);
const complexTestInstance = complexTestRenderer.root;
const complexImgs = complexTestInstance.findAllByType('img');
expect(complexImgs.length).toBeGreaterThan(0);
// Wildcard '*' should be replaced with '_' in the path
expect(complexImgs[0].props.src).toBe('assets/images/resources/_.crossplane.io/icon.svg');
});
it('should show group-based icon for wildcard group match (fluxcd)', () => {
const testRenderer = renderer.create(<ResourceIcon group='source.fluxcd.io' kind='GitRepository' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/_.fluxcd.io/icon.svg');
});
});
describe('fallback to kind-based icons (with non-matching group) - THIS IS THE BUG FIX', () => {
it('should fallback to kind-based icon for Ingress with networking.k8s.io group', () => {
// This is the main bug fix test case
// Ingress has group 'networking.k8s.io' which is NOT in resourceCustomizations
// But Ingress IS in resourceIcons, so it should still show the icon
const testRenderer = renderer.create(<ResourceIcon group='networking.k8s.io' kind='Ingress' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/ing.svg');
});
it('should fallback to kind-based icon for Service with core group', () => {
const testRenderer = renderer.create(<ResourceIcon group='' kind='Service' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/resources/svc.svg');
});
});
describe('fallback to initials (no matching group or kind)', () => {
it('should show initials for unknown resource with unknown group', () => {
const testRenderer = renderer.create(<ResourceIcon group='unknown.example.io' kind='UnknownResource' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBe(0);
// Should show initials "UR" (uppercase letters from UnknownResource)
const spans = testInstance.findAllByType('span');
const textSpan = spans.find(s => s.children.includes('UR'));
expect(textSpan).toBeTruthy();
});
it('should show initials for MyCustomKind', () => {
const testRenderer = renderer.create(<ResourceIcon group='' kind='MyCustomKind' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBe(0);
// Should show initials "MCK"
const spans = testInstance.findAllByType('span');
const textSpan = spans.find(s => s.children.includes('MCK'));
expect(textSpan).toBeTruthy();
});
});
describe('special cases', () => {
it('should show node icon for kind=node', () => {
const testRenderer = renderer.create(<ResourceIcon group='' kind='node' />);
const testInstance = testRenderer.root;
const imgs = testInstance.findAllByType('img');
expect(imgs.length).toBeGreaterThan(0);
expect(imgs[0].props.src).toBe('assets/images/infrastructure_components/node.svg');
});
it('should show application icon for kind=Application', () => {
const testRenderer = renderer.create(<ResourceIcon group='' kind='Application' />);
const testInstance = testRenderer.root;
const icons = testInstance.findAll(node => node.type === 'i' && typeof node.props.className === 'string' && node.props.className.includes('argo-icon-application'));
expect(icons.length).toBeGreaterThan(0);
});
});
});

View File

@@ -10,17 +10,18 @@ export const ResourceIcon = ({group, kind, customStyle}: {group: string; kind: s
if (kind === 'Application') {
return <i title={kind} className={`icon argo-icon-application`} style={customStyle} />;
}
if (!group) {
const i = resourceIcons.get(kind);
if (i !== undefined) {
return <img src={'assets/images/resources/' + i + '.svg'} alt={kind} style={{padding: '2px', width: '40px', height: '32px', ...customStyle}} />;
}
} else {
// First, check for group-based custom icons
if (group) {
const matchedGroup = matchGroupToResource(group);
if (matchedGroup) {
return <img src={`assets/images/resources/${matchedGroup}/icon.svg`} alt={kind} style={{paddingBottom: '2px', width: '40px', height: '32px', ...customStyle}} />;
}
}
// Fallback to kind-based icons (works for both empty group and non-matching groups)
const i = resourceIcons.get(kind);
if (i !== undefined) {
return <img src={'assets/images/resources/' + i + '.svg'} alt={kind} style={{padding: '2px', width: '40px', height: '32px', ...customStyle}} />;
}
const initials = kind.replace(/[a-z]/g, '');
const n = initials.length;
const style: React.CSSProperties = {

View File

@@ -16,6 +16,7 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/ptr"
@@ -37,8 +38,13 @@ func (db *db) getLocalCluster() *appv1.Cluster {
initLocalCluster.Do(func() {
info, err := db.kubeclientset.Discovery().ServerVersion()
if err == nil {
//nolint:staticcheck
localCluster.ServerVersion = fmt.Sprintf("%s.%s", info.Major, info.Minor)
ver, verErr := version.ParseGeneric(info.GitVersion)
if verErr == nil {
//nolint:staticcheck
localCluster.ServerVersion = ver.String()
} else {
log.Warnf("Failed to parse Kubernetes server version: %v", verErr)
}
//nolint:staticcheck
localCluster.ConnectionState = appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful}
} else {

View File

@@ -83,7 +83,7 @@ func (t testNormalizer) Normalize(un *unstructured.Unstructured) error {
}
case "postgresql.cnpg.io":
if un.GetKind() == "Cluster" {
if err := unstructured.SetNestedStringMap(un.Object, map[string]string{"cnpg.io/reloadedAt": "0001-01-01T00:00:00Z", "kubectl.kubernetes.io/restartedAt": "0001-01-01T00:00:00Z"}, "metadata", "annotations"); err != nil {
if err := setPgClusterAnnotations(un); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)
}
if err := unstructured.SetNestedField(un.Object, nil, "status", "targetPrimaryTimestamp"); err != nil {
@@ -136,6 +136,22 @@ func setFluxRequestedAtAnnotation(un *unstructured.Unstructured) error {
return unstructured.SetNestedStringMap(un.Object, map[string]string{"reconcile.fluxcd.io/requestedAt": "By Argo CD at: 0001-01-01T00:00:00"}, "metadata", "annotations")
}
// Helper: normalize PostgreSQL CNPG Cluster annotations while preserving existing ones
func setPgClusterAnnotations(un *unstructured.Unstructured) error {
// Get existing annotations or create an empty map
existingAnnotations, _, _ := unstructured.NestedStringMap(un.Object, "metadata", "annotations")
if existingAnnotations == nil {
existingAnnotations = make(map[string]string)
}
// Update only the specific keys
existingAnnotations["cnpg.io/reloadedAt"] = "0001-01-01T00:00:00Z"
existingAnnotations["kubectl.kubernetes.io/restartedAt"] = "0001-01-01T00:00:00Z"
// Set the updated annotations back
return unstructured.SetNestedStringMap(un.Object, existingAnnotations, "metadata", "annotations")
}
func (t testNormalizer) normalizeJob(un *unstructured.Unstructured) error {
if conditions, exist, err := unstructured.NestedSlice(un.Object, "status", "conditions"); err != nil {
return fmt.Errorf("failed to normalize %s: %w", un.GetKind(), err)

View File

@@ -187,7 +187,7 @@ func NewClientApp(settings *settings.ArgoCDSettings, dexServerAddr string, dexTL
encryptionKey: encryptionKey,
clientCache: cacheClient,
azure: azureApp{mtx: &sync.RWMutex{}},
refreshTokenThreshold: settings.OIDCRefreshTokenThreshold,
refreshTokenThreshold: settings.RefreshTokenThreshold(),
}
log.Infof("Creating client app (%s)", a.clientID)
u, err := url.Parse(settings.URL)

View File

@@ -136,9 +136,6 @@ type ArgoCDSettings struct {
// token verification to pass despite the OIDC provider having an invalid certificate. Only set to `true` if you
// understand the risks.
OIDCTLSInsecureSkipVerify bool `json:"oidcTLSInsecureSkipVerify"`
// OIDCRefreshTokenThreshold sets the threshold for preemptive server-side token refresh. If set to 0, tokens
// will not be refreshed and will expire before client is redirected to login.
OIDCRefreshTokenThreshold time.Duration `json:"oidcRefreshTokenThreshold,omitempty"`
// AppsInAnyNamespaceEnabled indicates whether applications are allowed to be created in any namespace
AppsInAnyNamespaceEnabled bool `json:"appsInAnyNamespaceEnabled"`
// ExtensionConfig configurations related to ArgoCD proxy extensions. The keys are the extension name.
@@ -1464,7 +1461,6 @@ func getDownloadBinaryUrlsFromConfigMap(argoCDCM *corev1.ConfigMap) map[string]s
func updateSettingsFromConfigMap(settings *ArgoCDSettings, argoCDCM *corev1.ConfigMap) {
settings.DexConfig = argoCDCM.Data[settingDexConfigKey]
settings.OIDCConfigRAW = argoCDCM.Data[settingsOIDCConfigKey]
settings.OIDCRefreshTokenThreshold = settings.RefreshTokenThreshold()
settings.KustomizeBuildOptions = argoCDCM.Data[kustomizeBuildOptionsKey]
settings.StatusBadgeEnabled = argoCDCM.Data[statusBadgeEnabledKey] == "true"
settings.StatusBadgeRootUrl = argoCDCM.Data[statusBadgeRootURLKey]
@@ -1917,7 +1913,12 @@ func (a *ArgoCDSettings) UserInfoCacheExpiration() time.Duration {
// RefreshTokenThreshold returns the duration before token expiration that a token should be refreshed by the server
func (a *ArgoCDSettings) RefreshTokenThreshold() time.Duration {
if oidcConfig := a.OIDCConfig(); oidcConfig != nil && oidcConfig.RefreshTokenThreshold != "" {
return a.RefreshTokenThresholdWithConfig(a.OIDCConfig())
}
// RefreshTokenThresholdWithConfig takes oidcConfig as param and returns the duration before token expiration that a token should be refreshed by the server
func (a *ArgoCDSettings) RefreshTokenThresholdWithConfig(oidcConfig *OIDCConfig) time.Duration {
if oidcConfig != nil && oidcConfig.RefreshTokenThreshold != "" {
refreshTokenThreshold, err := time.ParseDuration(oidcConfig.RefreshTokenThreshold)
if err != nil {
log.Warnf("Failed to parse 'oidc.config.refreshTokenThreshold' key: %v", err)