Compare commits

...

24 Commits

Author SHA1 Message Date
github-actions[bot]
8d0dde1388 Bump version to 3.2.2 on release-3.2 branch (#25729)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: reggie-k <19544836+reggie-k@users.noreply.github.com>
2025-12-18 11:39:51 +02:00
argo-cd-cherry-pick-bot[bot]
784f62ca6d fix(server): update resourceVersion on Terminate retry (cherry-pick #25650 for 3.2) (#25718)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-12-18 01:15:13 +01:00
Marco Maurer (-Kilchhofer)
33b5043405 fix(oidc): check userinfo endpoint in AuthMiddleware (cherry-pick #23586 for 3.2) (#25415)
Signed-off-by: Nathanael Liechti <technat@technat.ch>
Co-authored-by: Nathanael Liechti <technat@technat.ch>
2025-12-17 18:48:23 -05:00
Regina Voloshin
88fe638aff chore(deps):bumped gitops-engine to v0.7.1-0.20251217140045-5baed5604d2d with bumped k8s.io/kubernetes to 1.34.2 (#25708)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
2025-12-17 11:23:22 -05:00
argo-cd-cherry-pick-bot[bot]
a29703877e test(controller): avoid race in test (cherry-pick #25655 for 3.2) (#25691)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-12-16 21:06:06 +02:00
Regina Voloshin
95e7cdb16f chore(deps): bumped k8s.io/kubernetes v1.34.0 to v1.34.2 - manual cherry-pick of 25682 for 3-2 (#25687)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
2025-12-16 18:07:01 +02:00
argo-cd-cherry-pick-bot[bot]
122f4db3db fix(hydrator): appset should preserve annotation when hydration is requested (cherry-pick #25644 for 3.2) (#25654)
Signed-off-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
Co-authored-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
Co-authored-by: Regina Voloshin <regina.voloshin@codefresh.io>
2025-12-16 10:05:22 -05:00
argo-cd-cherry-pick-bot[bot]
2d65b26420 test: fix flaky impersonation test (cherry-pick #25641 for 3.2) (#25688)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-12-16 16:08:22 +02:00
Andreas Lindhé
0ace9bb9a3 docs: sync-waves guide: Use markdown formatting (cherry-pick #25372 for 3.2) (#25659)
Signed-off-by: Andreas Lindhé <7773090+lindhe@users.noreply.github.com>
Co-authored-by: Dov Murik <dov.murik@gmail.com>
2025-12-16 08:07:21 +02:00
argo-cd-cherry-pick-bot[bot]
6398ec3dcb chore: release champ 3.3 (cherry-pick #25202 for 3.2) (#25663)
Signed-off-by: Peter Jiang <35584807+pjiang-dev@users.noreply.github.com>
Co-authored-by: Peter Jiang <35584807+pjiang-dev@users.noreply.github.com>
2025-12-15 17:05:43 +02:00
argo-cd-cherry-pick-bot[bot]
732b16fb2a fix: create read and write secret for same url (cherry-pick #25581 for 3.2) (#25589)
Signed-off-by: emirot <emirot.nolan@gmail.com>
Co-authored-by: Nolan Emirot <emirot.nolan@gmail.com>
2025-12-10 11:17:58 +02:00
Ivan Pedersen
024c7e6020 chore: reference gitops-engine fork with nil pointer fix (#25522)
Signed-off-by: Ivan Pedersen <ivan.pedersen@volvocars.com>
2025-12-04 17:41:59 -05:00
argo-cd-cherry-pick-bot[bot]
26b7fb2c61 docs: add added healthchecks to upgrade docs (cherry-pick #25487 for 3.2) (#25490)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: Blake Pettersson <blake.pettersson@gmail.com>
2025-12-03 13:48:57 +01:00
github-actions[bot]
8c4ab63a9c Bump version to 3.2.1 on release-3.2 branch (#25449)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: reggie-k <19544836+reggie-k@users.noreply.github.com>
2025-11-30 13:37:38 +02:00
dudinea
29f869c82f fix: the concurrency issue with git detached processing in Repo Server (#25101) (cherry-pick #25127 for 3.2) (#25448)
Signed-off-by: Eugene Doudine <eugene.doudine@octopus.com>
2025-11-30 13:24:37 +02:00
argo-cd-cherry-pick-bot[bot]
c11e67d4bf docs: Document usage of ?. in notifications triggers and fix examples (#25352) (cherry-pick #25418 for 3.2) (#25421)
Signed-off-by: Eugene Doudine <eugene.doudine@octopus.com>
Co-authored-by: dudinea <eugene.doudine@octopus.com>
2025-11-26 09:41:59 +02:00
Regina Voloshin
a0a18438ab docs: Improve switch to annotation tracking docs, clarifying that a new Git commit may be needed to avoid orphan resources - (cherry-pick #25309 for 3.2) (#25338)
Signed-off-by: Regina Voloshin <regina.voloshin@codefresh.io>
2025-11-19 11:46:19 +01:00
Jaewoo Choi
dabdf39772 fix(ui): overlapping UI elements and add resource units to tooltips (cherry-pick #24717 for 3.2) (#25225)
Signed-off-by: choejwoo <jaewoo45@gmail.com>
2025-11-18 14:17:12 -08:00
argo-cd-cherry-pick-bot[bot]
cd8df1721c fix: Allow the ISVC to be healthy when the Stopped Condition is False (cherry-pick #25312 for 3.2) (#25318)
Signed-off-by: Hannah DeFazio <h2defazio@gmail.com>
Co-authored-by: Hannah DeFazio <h2defazio@gmail.com>
2025-11-17 23:20:41 -10:00
argo-cd-cherry-pick-bot[bot]
27c5065308 fix: revert #24197 (cherry-pick #25294 for 3.2) (#25314)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: Blake Pettersson <blake.pettersson@gmail.com>
2025-11-17 23:19:48 -10:00
Peter Jiang
1545390cd8 fix(cherry-pick): bump gitops-engine ssd regression (#25226)
Signed-off-by: Peter Jiang <peterjiang823@gmail.com>
2025-11-08 19:13:02 -05:00
argo-cd-cherry-pick-bot[bot]
7bd02d7f02 fix:(ui) don't render ApplicationSelector unless the panel is showing (cherry-pick #25201 for 3.2) (#25208)
Signed-off-by: Jonathan Winters <wintersjonathan0@gmail.com>
Co-authored-by: jwinters01 <34199886+jwinters01@users.noreply.github.com>
2025-11-06 17:55:27 -05:00
argo-cd-cherry-pick-bot[bot]
86c9994394 docs: update user content for deleting applications (cherry-pick #25124 for 3.2) (#25174)
Signed-off-by: Atif Ali <atali@redhat.com>
Co-authored-by: Atif Ali <atali@redhat.com>
Co-authored-by: Dan Garfield <dan@codefresh.io>
2025-11-05 01:15:04 -10:00
argo-cd-cherry-pick-bot[bot]
6dd5e7a6d2 fix(ui): add null-safe handling for assignedWindows in status panel (cherry-pick #25128 for 3.2) (#25180)
Signed-off-by: choejwoo <jaewoo45@gmail.com>
Co-authored-by: Jaewoo Choi <jaewoo45@gmail.com>
2025-11-05 01:12:47 -10:00
78 changed files with 1374 additions and 815 deletions

1
.gitignore vendored
View File

@@ -20,6 +20,7 @@ node_modules/
.kube/
./test/cmp/*.sock
.envrc.remote
.mirrord/
.*.swp
rerunreport.txt

View File

@@ -1 +1 @@
3.2.0
3.2.2

View File

@@ -75,6 +75,7 @@ const (
var defaultPreservedAnnotations = []string{
NotifiedAnnotationKey,
argov1alpha1.AnnotationKeyRefresh,
argov1alpha1.AnnotationKeyHydrate,
}
type deleteInOrder struct {

View File

@@ -589,6 +589,72 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
},
},
},
{
name: "Ensure that hydrate annotation is preserved from an existing app",
appSet: v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
Spec: v1alpha1.ApplicationSetSpec{
Template: v1alpha1.ApplicationSetTemplate{
Spec: v1alpha1.ApplicationSpec{
Project: "project",
},
},
},
},
existingApps: []v1alpha1.Application{
{
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
ResourceVersion: "2",
Annotations: map[string]string{
"annot-key": "annot-value",
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
},
},
Spec: v1alpha1.ApplicationSpec{
Project: "project",
},
},
},
desiredApps: []v1alpha1.Application{
{
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
},
Spec: v1alpha1.ApplicationSpec{
Project: "project",
},
},
},
expected: []v1alpha1.Application{
{
TypeMeta: metav1.TypeMeta{
Kind: application.ApplicationKind,
APIVersion: "argoproj.io/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "app1",
Namespace: "namespace",
ResourceVersion: "3",
Annotations: map[string]string{
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
},
},
Spec: v1alpha1.ApplicationSpec{
Project: "project",
},
},
},
},
{
name: "Ensure that configured preserved annotations are preserved from an existing app",
appSet: v1alpha1.ApplicationSet{

View File

@@ -80,6 +80,7 @@ func NewCommand() *cobra.Command {
includeHiddenDirectories bool
cmpUseManifestGeneratePaths bool
ociMediaTypes []string
enableBuiltinGitConfig bool
)
command := cobra.Command{
Use: cliName,
@@ -155,6 +156,7 @@ func NewCommand() *cobra.Command {
IncludeHiddenDirectories: includeHiddenDirectories,
CMPUseManifestGeneratePaths: cmpUseManifestGeneratePaths,
OCIMediaTypes: ociMediaTypes,
EnableBuiltinGitConfig: enableBuiltinGitConfig,
}, askPassServer)
errors.CheckError(err)
@@ -264,6 +266,7 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&includeHiddenDirectories, "include-hidden-directories", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_INCLUDE_HIDDEN_DIRECTORIES", false), "Include hidden directories from Git")
command.Flags().BoolVar(&cmpUseManifestGeneratePaths, "plugin-use-manifest-generate-paths", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_PLUGIN_USE_MANIFEST_GENERATE_PATHS", false), "Pass the resources described in argocd.argoproj.io/manifest-generate-paths value to the cmpserver to generate the application manifests.")
command.Flags().StringSliceVar(&ociMediaTypes, "oci-layer-media-types", env.StringsFromEnv("ARGOCD_REPO_SERVER_OCI_LAYER_MEDIA_TYPES", []string{"application/vnd.oci.image.layer.v1.tar", "application/vnd.oci.image.layer.v1.tar+gzip", "application/vnd.cncf.helm.chart.content.v1.tar+gzip"}, ","), "Comma separated list of allowed media types for OCI media types. This only accounts for media types within layers.")
command.Flags().BoolVar(&enableBuiltinGitConfig, "enable-builtin-git-config", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG", true), "Enable builtin git configuration options that are required for correct argocd-repo-server operation.")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, cacheutil.Options{
OnClientCreated: func(client *redis.Client) {

View File

@@ -2,7 +2,6 @@ package controller
import (
"context"
"encoding/json"
stderrors "errors"
"fmt"
"os"
@@ -263,7 +262,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, project *v1alp
// resources which in this case applies the live values in the configured
// ignore differences fields.
if syncOp.SyncOptions.HasOption("RespectIgnoreDifferences=true") {
patchedTargets, err := normalizeTargetResources(openAPISchema, compareResult)
patchedTargets, err := normalizeTargetResources(compareResult)
if err != nil {
state.Phase = common.OperationError
state.Message = fmt.Sprintf("Failed to normalize target resources: %s", err)
@@ -435,65 +434,53 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, project *v1alp
// - applies normalization to the target resources based on the live resources
// - copies ignored fields from the matching live resources: apply normalizer to the live resource,
// calculates the patch performed by normalizer and applies the patch to the target resource
func normalizeTargetResources(openAPISchema openapi.Resources, cr *comparisonResult) ([]*unstructured.Unstructured, error) {
// Normalize live and target resources (cleaning or aligning them)
func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructured, error) {
// normalize live and target resources
normalized, err := diff.Normalize(cr.reconciliationResult.Live, cr.reconciliationResult.Target, cr.diffConfig)
if err != nil {
return nil, err
}
patchedTargets := []*unstructured.Unstructured{}
for idx, live := range cr.reconciliationResult.Live {
normalizedTarget := normalized.Targets[idx]
if normalizedTarget == nil {
patchedTargets = append(patchedTargets, nil)
continue
}
gvk := normalizedTarget.GroupVersionKind()
originalTarget := cr.reconciliationResult.Target[idx]
if live == nil {
// No live resource, just use target
patchedTargets = append(patchedTargets, originalTarget)
continue
}
var (
lookupPatchMeta strategicpatch.LookupPatchMeta
versionedObject any
)
// Load patch meta struct or OpenAPI schema for CRDs
if versionedObject, err = scheme.Scheme.New(gvk); err == nil {
if lookupPatchMeta, err = strategicpatch.NewPatchMetaFromStruct(versionedObject); err != nil {
var lookupPatchMeta *strategicpatch.PatchMetaFromStruct
versionedObject, err := scheme.Scheme.New(normalizedTarget.GroupVersionKind())
if err == nil {
meta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject)
if err != nil {
return nil, err
}
} else if crdSchema := openAPISchema.LookupResource(gvk); crdSchema != nil {
lookupPatchMeta = strategicpatch.NewPatchMetaFromOpenAPI(crdSchema)
lookupPatchMeta = &meta
}
// Calculate live patch
livePatch, err := getMergePatch(normalized.Lives[idx], live, lookupPatchMeta)
if err != nil {
return nil, err
}
// Apply the patch to the normalized target
// This ensures ignored fields in live are restored into the target before syncing
normalizedTarget, err = applyMergePatch(normalizedTarget, livePatch, versionedObject, lookupPatchMeta)
normalizedTarget, err = applyMergePatch(normalizedTarget, livePatch, versionedObject)
if err != nil {
return nil, err
}
patchedTargets = append(patchedTargets, normalizedTarget)
}
return patchedTargets, nil
}
// getMergePatch calculates and returns the patch between the original and the
// modified unstructures.
func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMeta strategicpatch.LookupPatchMeta) ([]byte, error) {
func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMeta *strategicpatch.PatchMetaFromStruct) ([]byte, error) {
originalJSON, err := original.MarshalJSON()
if err != nil {
return nil, err
@@ -509,35 +496,18 @@ func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMet
return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
}
// applyMergePatch will apply the given patch in the obj and return the patched unstructure.
func applyMergePatch(obj *unstructured.Unstructured, patch []byte, versionedObject any, meta strategicpatch.LookupPatchMeta) (*unstructured.Unstructured, error) {
// applyMergePatch will apply the given patch in the obj and return the patched
// unstructure.
func applyMergePatch(obj *unstructured.Unstructured, patch []byte, versionedObject any) (*unstructured.Unstructured, error) {
originalJSON, err := obj.MarshalJSON()
if err != nil {
return nil, err
}
var patchedJSON []byte
switch {
case versionedObject != nil:
patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, patch, versionedObject)
case meta != nil:
var originalMap, patchMap map[string]any
if err := json.Unmarshal(originalJSON, &originalMap); err != nil {
return nil, err
}
if err := json.Unmarshal(patch, &patchMap); err != nil {
return nil, err
}
patchedMap, err := strategicpatch.StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, meta)
if err != nil {
return nil, err
}
patchedJSON, err = json.Marshal(patchedMap)
if err != nil {
return nil, err
}
default:
if versionedObject == nil {
patchedJSON, err = jsonpatch.MergePatch(originalJSON, patch)
} else {
patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, patch, versionedObject)
}
if err != nil {
return nil, err

View File

@@ -1,17 +1,9 @@
package controller
import (
"fmt"
"os"
"strconv"
"testing"
openapi_v2 "github.com/google/gnostic-models/openapiv2"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubectl/pkg/util/openapi"
"sigs.k8s.io/yaml"
"github.com/argoproj/gitops-engine/pkg/sync"
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
@@ -31,29 +23,6 @@ import (
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
)
type fakeDiscovery struct {
schema *openapi_v2.Document
}
func (f *fakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) {
return f.schema, nil
}
func loadCRDSchema(t *testing.T, path string) *openapi_v2.Document {
t.Helper()
data, err := os.ReadFile(path)
require.NoError(t, err)
jsonData, err := yaml.YAMLToJSON(data)
require.NoError(t, err)
doc, err := openapi_v2.ParseDocument(jsonData)
require.NoError(t, err)
return doc
}
func TestPersistRevisionHistory(t *testing.T) {
app := newFakeApp()
app.Status.OperationState = nil
@@ -416,7 +385,7 @@ func TestNormalizeTargetResources(t *testing.T) {
f := setup(t, ignores)
// when
targets, err := normalizeTargetResources(nil, f.comparisonResult)
targets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -429,7 +398,7 @@ func TestNormalizeTargetResources(t *testing.T) {
f := setup(t, []v1alpha1.ResourceIgnoreDifferences{})
// when
targets, err := normalizeTargetResources(nil, f.comparisonResult)
targets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -449,7 +418,7 @@ func TestNormalizeTargetResources(t *testing.T) {
unstructured.RemoveNestedField(live.Object, "metadata", "annotations", "iksm-version")
// when
targets, err := normalizeTargetResources(nil, f.comparisonResult)
targets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -474,7 +443,7 @@ func TestNormalizeTargetResources(t *testing.T) {
f := setup(t, ignores)
// when
targets, err := normalizeTargetResources(nil, f.comparisonResult)
targets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -489,6 +458,7 @@ func TestNormalizeTargetResources(t *testing.T) {
assert.Equal(t, int64(4), replicas)
})
t.Run("will keep new array entries not found in live state if not ignored", func(t *testing.T) {
t.Skip("limitation in the current implementation")
// given
ignores := []v1alpha1.ResourceIgnoreDifferences{
{
@@ -502,7 +472,7 @@ func TestNormalizeTargetResources(t *testing.T) {
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
// when
targets, err := normalizeTargetResources(nil, f.comparisonResult)
targets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -539,11 +509,6 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
}
t.Run("will properly ignore nested fields within arrays", func(t *testing.T) {
doc := loadCRDSchema(t, "testdata/schemas/httpproxy_openapi_v2.yaml")
disco := &fakeDiscovery{schema: doc}
oapiGetter := openapi.NewOpenAPIGetter(disco)
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
require.NoError(t, err)
// given
ignores := []v1alpha1.ResourceIgnoreDifferences{
{
@@ -557,11 +522,8 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
gvk := schema.GroupVersionKind{Group: "projectcontour.io", Version: "v1", Kind: "HTTPProxy"}
fmt.Printf("LookupResource result: %+v\n", oapiResources.LookupResource(gvk))
// when
patchedTargets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
patchedTargets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -600,7 +562,7 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
// when
targets, err := normalizeTargetResources(nil, f.comparisonResult)
targets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -652,7 +614,7 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
// when
targets, err := normalizeTargetResources(nil, f.comparisonResult)
targets, err := normalizeTargetResources(f.comparisonResult)
// then
require.NoError(t, err)
@@ -706,175 +668,6 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
assert.Equal(t, "EV", env0["name"])
assert.Equal(t, "here", env0["value"])
})
t.Run("patches ignored differences in individual array elements of HTTPProxy CRD", func(t *testing.T) {
doc := loadCRDSchema(t, "testdata/schemas/httpproxy_openapi_v2.yaml")
disco := &fakeDiscovery{schema: doc}
oapiGetter := openapi.NewOpenAPIGetter(disco)
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
require.NoError(t, err)
ignores := []v1alpha1.ResourceIgnoreDifferences{
{
Group: "projectcontour.io",
Kind: "HTTPProxy",
JQPathExpressions: []string{".spec.routes[].rateLimitPolicy.global.descriptors[].entries[]"},
},
}
f := setupHTTPProxy(t, ignores)
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
live := test.YamlToUnstructured(testdata.LiveHTTPProxy)
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
patchedTargets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
require.NoError(t, err)
require.Len(t, patchedTargets, 1)
patched := patchedTargets[0]
// verify descriptors array in patched target
descriptors := dig(patched.Object, "spec", "routes", 0, "rateLimitPolicy", "global", "descriptors").([]any)
require.Len(t, descriptors, 1) // Only the descriptors with ignored entries should remain
// verify individual entries array inside the descriptor
entriesArr := dig(patched.Object, "spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries").([]any)
require.Len(t, entriesArr, 1) // Only the ignored entry should be patched
// verify the content of the entry is preserved correctly
entry := entriesArr[0].(map[string]any)
requestHeader := entry["requestHeader"].(map[string]any)
assert.Equal(t, "sample-header", requestHeader["headerName"])
assert.Equal(t, "sample-key", requestHeader["descriptorKey"])
})
}
func TestNormalizeTargetResourcesCRDs(t *testing.T) {
type fixture struct {
comparisonResult *comparisonResult
}
setupHTTPProxy := func(t *testing.T, ignores []v1alpha1.ResourceIgnoreDifferences) *fixture {
t.Helper()
dc, err := diff.NewDiffConfigBuilder().
WithDiffSettings(ignores, nil, true, normalizers.IgnoreNormalizerOpts{}).
WithNoCache().
Build()
require.NoError(t, err)
live := test.YamlToUnstructured(testdata.SimpleAppLiveYaml)
target := test.YamlToUnstructured(testdata.SimpleAppTargetYaml)
return &fixture{
&comparisonResult{
reconciliationResult: sync.ReconciliationResult{
Live: []*unstructured.Unstructured{live},
Target: []*unstructured.Unstructured{target},
},
diffConfig: dc,
},
}
}
t.Run("sample-app", func(t *testing.T) {
doc := loadCRDSchema(t, "testdata/schemas/simple-app.yaml")
disco := &fakeDiscovery{schema: doc}
oapiGetter := openapi.NewOpenAPIGetter(disco)
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
require.NoError(t, err)
ignores := []v1alpha1.ResourceIgnoreDifferences{
{
Group: "example.com",
Kind: "SimpleApp",
JQPathExpressions: []string{".spec.servers[1].enabled", ".spec.servers[0].port"},
},
}
f := setupHTTPProxy(t, ignores)
target := test.YamlToUnstructured(testdata.SimpleAppTargetYaml)
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
live := test.YamlToUnstructured(testdata.SimpleAppLiveYaml)
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
patchedTargets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
require.NoError(t, err)
require.Len(t, patchedTargets, 1)
patched := patchedTargets[0]
require.NotNil(t, patched)
// 'spec.servers' array has length 2
servers := dig(patched.Object, "spec", "servers").([]any)
require.Len(t, servers, 2)
// first server's 'name' is 'server1'
name1 := dig(patched.Object, "spec", "servers", 0, "name").(string)
assert.Equal(t, "server1", name1)
assert.Equal(t, int64(8081), dig(patched.Object, "spec", "servers", 0, "port").(int64))
assert.Equal(t, int64(9090), dig(patched.Object, "spec", "servers", 1, "port").(int64))
// first server's 'enabled' should be true
enabled1 := dig(patched.Object, "spec", "servers", 0, "enabled").(bool)
assert.True(t, enabled1)
// second server's 'name' should be 'server2'
name2 := dig(patched.Object, "spec", "servers", 1, "name").(string)
assert.Equal(t, "server2", name2)
// second server's 'enabled' should be true (respected from live due to ignoreDifferences)
enabled2 := dig(patched.Object, "spec", "servers", 1, "enabled").(bool)
assert.True(t, enabled2)
})
t.Run("rollout-obj", func(t *testing.T) {
// Load Rollout CRD schema like SimpleApp
doc := loadCRDSchema(t, "testdata/schemas/rollout-schema.yaml")
disco := &fakeDiscovery{schema: doc}
oapiGetter := openapi.NewOpenAPIGetter(disco)
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
require.NoError(t, err)
ignores := []v1alpha1.ResourceIgnoreDifferences{
{
Group: "argoproj.io",
Kind: "Rollout",
JQPathExpressions: []string{`.spec.template.spec.containers[] | select(.name == "init") | .image`},
},
}
f := setupHTTPProxy(t, ignores)
live := test.YamlToUnstructured(testdata.LiveRolloutYaml)
target := test.YamlToUnstructured(testdata.TargetRolloutYaml)
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
targets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
require.NoError(t, err)
require.Len(t, targets, 1)
patched := targets[0]
require.NotNil(t, patched)
containers := dig(patched.Object, "spec", "template", "spec", "containers").([]any)
require.Len(t, containers, 2)
initContainer := containers[0].(map[string]any)
mainContainer := containers[1].(map[string]any)
// Assert init container image is preserved (ignoreDifferences works)
initImage := dig(initContainer, "image").(string)
assert.Equal(t, "init-container:v1", initImage)
// Assert main container fields as expected
mainName := dig(mainContainer, "name").(string)
assert.Equal(t, "main", mainName)
mainImage := dig(mainContainer, "image").(string)
assert.Equal(t, "main-container:v1", mainImage)
})
}
func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {

View File

@@ -32,16 +32,4 @@ var (
//go:embed additional-image-replicas-deployment.yaml
AdditionalImageReplicaDeploymentYaml string
//go:embed simple-app-live.yaml
SimpleAppLiveYaml string
//go:embed simple-app-target.yaml
SimpleAppTargetYaml string
//go:embed target-rollout.yaml
TargetRolloutYaml string
//go:embed live-rollout.yaml
LiveRolloutYaml string
)

View File

@@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Rollout
metadata:
name: rollout-sample
spec:
replicas: 2
strategy:
canary:
steps:
- setWeight: 20
selector:
matchLabels:
app: rollout-sample
template:
metadata:
labels:
app: rollout-sample
spec:
containers:
- name: init
image: init-container:v1
livenessProbe:
initialDelaySeconds: 10
- name: main
image: main-container:v1

View File

@@ -1,62 +0,0 @@
swagger: "2.0"
info:
title: HTTPProxy
version: "v1"
paths: {}
definitions:
io.projectcontour.v1.HTTPProxy:
type: object
x-kubernetes-group-version-kind:
- group: projectcontour.io
version: v1
kind: HTTPProxy
properties:
spec:
type: object
properties:
routes:
type: array
items:
type: object
properties:
rateLimitPolicy:
type: object
properties:
global:
type: object
properties:
descriptors:
type: array
x-kubernetes-list-map-keys:
- entries
items:
type: object
properties:
entries:
type: array
x-kubernetes-list-map-keys:
- headerName
items:
type: object
properties:
requestHeader:
type: object
properties:
descriptorKey:
type: string
headerName:
type: string
requestHeaderValueMatch:
type: object
properties:
headers:
type: array
items:
type: object
properties:
name:
type: string
contains:
type: string
value:
type: string

View File

@@ -1,67 +0,0 @@
swagger: "2.0"
info:
title: Rollout
version: "v1alpha1"
paths: {}
definitions:
argoproj.io.v1alpha1.Rollout:
type: object
x-kubernetes-group-version-kind:
- group: argoproj.io
version: v1alpha1
kind: Rollout
properties:
spec:
type: object
properties:
replicas:
type: integer
strategy:
type: object
properties:
canary:
type: object
properties:
steps:
type: array
items:
type: object
properties:
setWeight:
type: integer
selector:
type: object
properties:
matchLabels:
type: object
additionalProperties:
type: string
template:
type: object
properties:
metadata:
type: object
properties:
labels:
type: object
additionalProperties:
type: string
spec:
type: object
properties:
containers:
type: array
x-kubernetes-list-map-keys:
- name
items:
type: object
properties:
name:
type: string
image:
type: string
livenessProbe:
type: object
properties:
initialDelaySeconds:
type: integer

View File

@@ -1,29 +0,0 @@
swagger: "2.0"
info:
title: SimpleApp
version: "v1"
paths: {}
definitions:
example.com.v1.SimpleApp:
type: object
x-kubernetes-group-version-kind:
- group: example.com
version: v1
kind: SimpleApp
properties:
spec:
type: object
properties:
servers:
type: array
x-kubernetes-list-map-keys:
- name
items:
type: object
properties:
name:
type: string
port:
type: integer
enabled:
type: boolean

View File

@@ -1,12 +0,0 @@
apiVersion: example.com/v1
kind: SimpleApp
metadata:
name: simpleapp-sample
spec:
servers:
- name: server1
port: 8081 # port changed in live from 8080
enabled: true
- name: server2
port: 9090
enabled: true # enabled changed in live from false

View File

@@ -1,12 +0,0 @@
apiVersion: example.com/v1
kind: SimpleApp
metadata:
name: simpleapp-sample
spec:
servers:
- name: server1
port: 8080
enabled: true
- name: server2
port: 9090
enabled: false

View File

@@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Rollout
metadata:
name: rollout-sample
spec:
replicas: 2
strategy:
canary:
steps:
- setWeight: 20
selector:
matchLabels:
app: rollout-sample
template:
metadata:
labels:
app: rollout-sample
spec:
containers:
- name: init
image: init-container:v1
livenessProbe:
initialDelaySeconds: 15
- name: main
image: main-container:v1

View File

@@ -18,9 +18,11 @@ These are the upcoming releases dates:
| v2.13 | Monday, Sep. 16, 2024 | Monday, Nov. 4, 2024 | [Regina Voloshin](https://github.com/reggie-k) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/19513) |
| v2.14 | Monday, Dec. 16, 2024 | Monday, Feb. 3, 2025 | [Ryan Umstead](https://github.com/rumstead) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/20869) |
| v3.0 | Monday, Mar. 17, 2025 | Tuesday, May 6, 2025 | [Regina Voloshin](https://github.com/reggie-k) | | [checklist](https://github.com/argoproj/argo-cd/issues/21735) |
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](#) |
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | | [checklist](#) |
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | | |
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](https://github.com/argoproj/argo-cd/issues/23347) |
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | [Michael Crenshaw](https://github.com/crenshaw-dev) | [checklist](https://github.com/argoproj/argo-cd/issues/24539) |
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | [Peter Jiang](https://github.com/pjiang-dev) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/25211) |
| v3.4 | Monday, Mar. 16, 2026 | Monday, May. 4, 2026 | | |
| v3.5 | Monday, Jun. 15, 2026 | Monday, Aug. 3, 2026 | | |
Actual release dates might differ from the plan by a few days.

View File

@@ -219,6 +219,8 @@ data:
reposerver.git.lsremote.parallelism.limit: "0"
# Git requests timeout.
reposerver.git.request.timeout: "15s"
# Enable builtin git configuration options that are required for correct argocd-repo-server operation (default "true")
reposerver.enable.builtin.git.config: "true"
# Include hidden directories from Git
reposerver.include.hidden.directories: "false"

View File

@@ -121,6 +121,18 @@ spec:
...
```
### Deleting child applications
When working with the App of Apps pattern, you may need to delete individual child applications. Starting in 3.2, Argo CD provides consistent deletion behaviour whether you delete from the Applications List or from the parent application's Resource Tree.
For detailed information about deletion options and behaviour, including:
- Consistent deletion across UI views
- Non-cascading (orphan) deletion to preserve managed resources
- Child application detection and improved dialog messages
- Best practices and example scenarios
See [Deleting Applications in the UI](../user-guide/app_deletion.md#deleting-applications-in-the-ui).
### Ignoring differences in child applications
To allow changes in child apps without triggering an out-of-sync status, or modification for debugging etc, the app of apps pattern works with [diff customization](../user-guide/diffing/). The example below shows how to ignore changes to syncPolicy and other common values.

View File

@@ -0,0 +1,43 @@
# Git Configuration
## System Configuration
Argo CD uses the Git installation from its base image (Ubuntu), which
includes a standard system configuration file located at
`/etc/gitconfig`. This file is minimal, just defining filters
necessary for Git LFS functionality.
You can customize Git's system configuration by mounting a file from a
ConfigMap or by creating a custom Argo CD image.
## Global Configuration
Argo CD runs Git with the `HOME` environment variable set to
`/dev/null`. As a result, global Git configuration is not supported.
## Built-in Configuration
The `argocd-repo-server` adds specific configuration parameters to the
Git environment to ensure proper Argo CD operation. These built-in
settings override any conflicting values from the system Git
configuration.
Currently, the following built-in configuration options are set:
- `maintenance.autoDetach=false`
- `gc.autoDetach=false`
These settings force Git's repository maintenance tasks to run in the
foreground. This prevents Git from running detached background
processes that could modify the repository and interfere with
subsequent Git invocations from `argocd-repo-server`.
You can disable these built-in settings by setting the
`argocd-cmd-params-cm` value `reposerver.enable.builtin.git.config` to
`"false"`. This allows you to experiment with background processing or
if you are certain that concurrency issues will not occur in your
environment.
> [!NOTE]
> Disabling this is not recommended and is not supported!

View File

@@ -35,14 +35,26 @@ metadata:
name: argocd-notifications-cm
data:
trigger.sync-operation-change: |
- when: app.status.operationState.phase in ['Succeeded']
- when: app.status?.operationState.phase in ['Succeeded']
send: [github-commit-status]
- when: app.status.operationState.phase in ['Running']
- when: app.status?.operationState.phase in ['Running']
send: [github-commit-status]
- when: app.status.operationState.phase in ['Error', 'Failed']
- when: app.status?.operationState.phase in ['Error', 'Failed']
send: [app-sync-failed, github-commit-status]
```
## Accessing Optional Manifest Sections and Fields
Note that in the trigger example above, the `?.` (optional chaining) operator is used to access the Application's
`status.operationState` section. This section is optional; it is not present when an operation has been initiated but has not yet
started by the Application Controller.
If the `?.` operator were not used, `status.operationState` would resolve to `nil` and the evaluation of the
`app.status.operationState.phase` expression would fail. The `app.status?.operationState.phase` expression is equivalent to
`app.status.operationState != nil ? app.status.operationState.phase : nil`.
## Avoid Sending Same Notification Too Often
In some cases, the trigger condition might be "flapping". The example below illustrates the problem.
@@ -60,14 +72,14 @@ data:
# Optional 'oncePer' property ensure that notification is sent only once per specified field value
# E.g. following is triggered once per sync revision
trigger.on-deployed: |
when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy'
when: app.status?.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy'
oncePer: app.status.sync.revision
send: [app-sync-succeeded]
```
**Mono Repo Usage**
When one repo is used to sync multiple applications, the `oncePer: app.status.sync.revision` field will trigger a notification for each commit. For mono repos, the better approach will be using `oncePer: app.status.operationState.syncResult.revision` statement. This way a notification will be sent only for a particular Application's revision.
When one repo is used to sync multiple applications, the `oncePer: app.status.sync.revision` field will trigger a notification for each commit. For mono repos, the better approach will be using `oncePer: app.status?.operationState.syncResult.revision` statement. This way a notification will be sent only for a particular Application's revision.
### oncePer
@@ -122,7 +134,7 @@ Triggers have access to the set of built-in functions.
Example:
```yaml
when: time.Now().Sub(time.Parse(app.status.operationState.startedAt)).Minutes() >= 5
when: time.Now().Sub(time.Parse(app.status?.operationState.startedAt)).Minutes() >= 5
```
{!docs/operator-manual/notifications/functions.md!}

View File

@@ -21,6 +21,7 @@ argocd-repo-server [flags]
--disable-helm-manifest-max-extracted-size Disable maximum size of helm manifest archives when extracted
--disable-oci-manifest-max-extracted-size Disable maximum size of oci manifest archives when extracted
--disable-tls Disable TLS on the gRPC endpoint
--enable-builtin-git-config Enable builtin git configuration options that are required for correct argocd-repo-server operation. (default true)
--helm-manifest-max-extracted-size string Maximum size of helm manifest archives when extracted (default "1G")
--helm-registry-max-index-size string Maximum size of registry index file (default "1G")
-h, --help help for argocd-repo-server

View File

@@ -55,4 +55,16 @@ spec:
+ protocol: UDP
+ - port: 53
+ protocol: TCP
```
```
## Added Healthchecks
* [route53.aws.crossplane.io/ResourceRecordSet](https://github.com/argoproj/argo-cd/commit/666499f6108124ef7bfa0c6cc616770c6dc4f42c)
* [cloudfront.aws.crossplane.io/Distribution](https://github.com/argoproj/argo-cd/commit/21c384f42354ada2b94c18773104527eb27f86bc)
* [beat.k8s.elastic.co/Beat](https://github.com/argoproj/argo-cd/commit/5100726fd61617a0001a27233cfe8ac4354bdbed)
* [apps.kruise.io/AdvancedCronjob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
* [apps.kruise.io/BroadcastJob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
* [apps.kruise.io/CloneSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
* [apps.kruise.io/DaemonSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
* [apps.kruise.io/StatefulSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
* [rollouts.kruise.io/Rollout](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)

View File

@@ -57,3 +57,24 @@ The affected ApplicationSet fields are the following (jq selector syntax):
* `.spec.generators[].clusterDecisionResource.labelSelector`
* `.spec.generators[].matrix.generators[].selector`
* `.spec.generators[].merge.generators[].selector`
## Added Healthchecks
* [core.humio.com/HumioAction](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
* [core.humio.com/HumioAlert](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
* [core.humio.com/HumioCluster](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
* [core.humio.com/HumioIngestToken](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
* [core.humio.com/HumioParser](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
* [core.humio.com/HumioRepository](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
* [core.humio.com/HumioView](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
* [k8s.mariadb.com/Backup](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
* [k8s.mariadb.com/Database](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
* [k8s.mariadb.com/Grant](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
* [k8s.mariadb.com/MariaDB](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
* [k8s.mariadb.com/SqlJob](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
* [k8s.mariadb.com/User](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
* [kafka.strimzi.io/KafkaBridge](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
* [kafka.strimzi.io/KafkaConnector](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
* [keda.sh/ScaledObject](https://github.com/argoproj/argo-cd/commit/9bc9ff9c7a3573742a767c38679cbefb4f07c1c0)
* [openfaas.com/Function](https://github.com/argoproj/argo-cd/commit/2a05ae02ab90ae06fefa97ed6b9310590d317783)
* [camel.apache.org/Integration](https://github.com/argoproj/argo-cd/commit/1e2f5987d25307581cd56b8fe9d329633e0f704f)

View File

@@ -68,6 +68,41 @@ The default extension for log files generated by Argo CD when using the "Downloa
- Consistency with standard log file conventions.
If you have any custom scripts or tools that depend on the `.txt` extension, please update them accordingly.
## Added proxy to kustomize
Proxy config set on repository credentials / repository templates is now passed down to the `kustomize build` command.
## Added Healthchecks
* [controlplane.cluster.x-k8s.io/AWSManagedControlPlane](https://github.com/argoproj/argo-cd/commit/f1105705126153674c79f69b5d9c9647360d16f5)
* [policy.open-cluster-management.io/CertificatePolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
* [policy.open-cluster-management.io/ConfigurationPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
* [policy.open-cluster-management.io/OperatorPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
* [policy.open-cluster-management.io/Policy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
* [PodDisruptionBudget](https://github.com/argoproj/argo-cd/commit/e86258d8a5049260b841abc0ef1fd7f7a4b7cd45)
* [cluster.x-k8s.io/MachinePool](https://github.com/argoproj/argo-cd/commit/59e00911304288b4f96889bf669b6ed2aecdf31b)
* [lifecycle.keptn.sh/KeptnWorkloadVersion](https://github.com/argoproj/argo-cd/commit/ddc0b0fd3fa7e0b53170582846b20be23c301185)
* [numaplane.numaproj.io/ISBServiceRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
* [numaplane.numaproj.io/NumaflowControllerRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
* [numaplane.numaproj.io/PipelineRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
* [rds.aws.crossplane.io/DBCluster](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
* [rds.aws.crossplane.io/DBInstance](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
* [iam.aws.crossplane.io/Policy](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
* [iam.aws.crossplane.io/RolePolicyAttachment](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
* [iam.aws.crossplane.io/Role](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
* [s3.aws.crossplane.io/Bucket](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
* [metrics.keptn.sh/KeptnMetric](https://github.com/argoproj/argo-cd/commit/326cc4a06b2cb5ac99797d3f04c2d4c48b8692e2)
* [metrics.keptn.sh/Analysis](https://github.com/argoproj/argo-cd/commit/e26c105e527ed262cc5dc838a793841017ba316a)
* [numaplane.numaproj.io/MonoVertexRollout](https://github.com/argoproj/argo-cd/commit/32ee00f1f494f69cc84d1881dda70ce514e1f737)
* [helm.toolkit.fluxcd.io/HelmRelease](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [image.toolkit.fluxcd.io/ImagePolicy](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [image.toolkit.fluxcd.io/ImageRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [image.toolkit.fluxcd.io/ImageUpdateAutomation](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [kustomize.toolkit.fluxcd.io/Kustomization](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [notification.toolkit.fluxcd.io/Receiver](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [source.toolkit.fluxcd.io/Bucket](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [source.toolkit.fluxcd.io/GitRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [source.toolkit.fluxcd.io/HelmChart](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [source.toolkit.fluxcd.io/HelmRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
* [source.toolkit.fluxcd.io/OCIRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)

View File

@@ -20,3 +20,27 @@ the [CLI and Application CR](https://argo-cd.readthedocs.io/en/latest/user-guide
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
the project API response was sanitized to remove sensitive information. This includes
credentials of project-scoped repositories and clusters.
## Added Healthchecks
* [platform.confluent.io/Connector](https://github.com/argoproj/argo-cd/commit/99efafb55a553a9ab962d56c20dab54ba65b7ae0)
* [addons.cluster.x-k8s.io/ClusterResourceSet](https://github.com/argoproj/argo-cd/commit/fdf539dc6a027ef975fde23bf734f880570ccdc3)
* [numaflow.numaproj.io/InterStepBufferService](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
* [numaflow.numaproj.io/MonoVertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
* [numaflow.numaproj.io/Pipeline](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
* [numaflow.numaproj.io/Vertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
* [acid.zalan.do/Postgresql](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
* [grafana.integreatly.org/Grafana](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
* [grafana.integreatly.org/GrafanaDatasource](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
* [k8s.keycloak.org/Keycloak](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
* [solr.apache.org/SolrCloud](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
* [gateway.solo.io/Gateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gateway.solo.io/MatchableHttpGateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gateway.solo.io/RouteOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gateway.solo.io/RouteTable](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gateway.solo.io/VirtualHostOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gateway.solo.io/VirtualService](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gloo.solo.io/Proxy](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gloo.solo.io/Settings](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gloo.solo.io/Upstream](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
* [gloo.solo.io/UpstreamGroup](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)

View File

@@ -288,6 +288,9 @@ resources.
delete it. To avoid this edge case, it is recommended to perform a sync operation on your Applications, even if
they are not out of sync, so that orphan resource detection will work as expected on the next sync.
After upgrading to version 3.0, the Argo CD tracking annotation will only appear on an Applications resources when
either a new Git commit is made or the Application is explicitly synced.
##### Users who rely on label-based for resources that are not managed by Argo CD
Some users rely on label-based tracking to track resources that are not managed by Argo CD. They may set annotations
to have Argo CD ignore the resource as extraneous or to disable pruning. If you are using label-based tracking to track
@@ -497,4 +500,8 @@ More details for ignored resource updates in the [Diffing customization](../../u
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
the project API response was sanitized to remove sensitive information. This includes
credentials of project-scoped repositories and clusters.
credentials of project-scoped repositories and clusters.
## Added Healthchecks
* No new added health checks

View File

@@ -63,3 +63,25 @@ to the [release notes](https://github.com/kubernetes-sigs/kustomize/releases/tag
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
the project API response was sanitized to remove sensitive information. This includes
credentials of project-scoped repositories and clusters.
## Added Healthchecks
* [core.spinkube.dev/SpinApp](https://github.com/argoproj/argo-cd/commit/7d6604404fd3b7d77124f9623a2d7a12cc24a0bb)
* [opentelemetry.io/OpenTelemetryCollector](https://github.com/argoproj/argo-cd/commit/65464d8b77941c65499028bb14172fc40e62e38b)
* [logstash.k8s.elastic.co/Logstash](https://github.com/argoproj/argo-cd/commit/8f1f5c7234e694a4830744f92e1b0f8d1e3cd43d)
* [kyverno.io/Policy](https://github.com/argoproj/argo-cd/commit/e578b85410f748c6c7b4e10ff1a5fdbca09b3328)
* [projectcontour.io/HTTPProxy](https://github.com/argoproj/argo-cd/commit/ce4b7a28cc77959fab5b6fedd14b1f9e9a4af4f7)
* [grafana.integreatly.org/GrafanaDashboard](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
* [grafana.integreatly.org/GrafanaFolder](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
* [postgresql.cnpg.io/Cluster](https://github.com/argoproj/argo-cd/commit/f4edcf7717940e44a141dadb5ca8c5fc11951cb2)
* [gateway.networking.k8s.io/GRPCRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
* [gateway.networking.k8s.io/Gateway](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
* [gateway.networking.k8s.io/HTTPRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
* [rabbitmq.com/Binding](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
* [rabbitmq.com/Exchange](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
* [rabbitmq.com/Permission](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
* [rabbitmq.com/Policy](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
* [rabbitmq.com/Queue](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
* [rabbitmq.com/Shovel](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
* [rabbitmq.com/User](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
* [rabbitmq.com/Vhost](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)

View File

@@ -92,3 +92,30 @@ credentials of project-scoped repositories and clusters.
The `resources` field of the `status` resource of an ApplicationSet is now limited to 5000 elements by default. This is
to prevent status bloat and exceeding etcd limits. The limit can be configured by setting the `applicationsetcontroller.status.max.resources.count`
field in the `argocd-cmd-params-cm` ConfigMap.
## Added Healthchecks
* [datadoghq.com/DatadogMetric](https://github.com/argoproj/argo-cd/commit/5c9a5ef9a65f8e04e729fbae54a9310c0a42f6c2)
* [CronJob](https://github.com/argoproj/argo-cd/commit/d3de4435ce86f3f85a4cc58978b2544af2ac4248)
* [promoter.argoproj.io/ArgoCDCommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
* [promoter.argoproj.io/ChangeTransferPolicy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
* [promoter.argoproj.io/CommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
* [promoter.argoproj.io/PromotionStrategy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
* [promoter.argoproj.io/PullRequest](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
* [coralogix.com/Alert](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
* [coralogix.com/RecordingRuleGroupSet](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
* [projectcontour.io/ExtensionService](https://github.com/argoproj/argo-cd/commit/4e63bc756394d93c684b6b8e8b3856e0e6b3f199)
* [clickhouse-keeper.altinity.com/ClickHouseKeeperInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
* [clickhouse.altinity.com/ClickHouseInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
* [apps.3scale.net/APIManager](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/ActiveDoc](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/ApplicationAuth](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/Application](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/Backend](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/CustomPolicyDefinition](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/DeveloperAccount](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/DeveloperUser](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/OpenAPI](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/Product](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/ProxyConfigPromote](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
* [capabilities.3scale.net/Tenant](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)

View File

@@ -58,3 +58,120 @@ Argo CD performs [background cascading deletion](https://kubernetes.io/docs/conc
When you invoke `argocd app delete` with `--cascade`, the finalizer is added automatically.
You can set the propagation policy with `--propagation-policy <foreground|background>`.
## Deleting Applications in the UI
Argo CD provides a consistent deletion experience across different views in the UI. When deleting applications, you can access the delete functionality from:
- **Applications List View**: The main applications page showing all applications
- **Application Details View - Resource Tree**: When viewing an application's resource tree that contains child applications
### Consistent Deletion Behaviour
Starting in Argo CD 3.2, deletion behavior is now **consistent** across all UI views. Whether you delete an application from the Applications List or from the Resource Tree view, the same deletion mechanism and options are used.
Previously, deleting an application from the Resource Tree treated it as a generic Kubernetes resource, which could lead to unexpected behaviour with non-cascading deletes. Now, Argo CD properly detects Application resources and uses the standard Application deletion API in all contexts.
### Deleting Child Applications in App of Apps Pattern
When using the [App of Apps pattern](../operator-manual/cluster-bootstrapping.md), parent applications can contain child applications as resources. Argo CD automatically detects child applications and provides improved dialog messages to help you understand what you're deleting.
#### Child Application Detection
Argo CD identifies a child application by checking for the `app.kubernetes.io/part-of` label. If this label is present and has a non-empty value, the application is considered a child application.
#### Delete Dialog Differences
**When deleting a child application:**
- Dialog title: "Delete child application"
- Confirmation prompt references "child application" to make it clear you're deleting a managed application
- Additional warning note appears when deleting from the Resource Tree
**When deleting a regular application:**
- Dialog title: "Delete application"
- Standard confirmation prompt
**When deleting from the Resource Tree:**
An additional informational note appears:
> ⚠️ **Note:** You are about to delete an Application from the resource tree. This uses the same deletion behaviour as the Applications list page.
This note clarifies that the deletion will use the proper Application deletion API, not generic Kubernetes resource deletion.
### Deletion Options (Propagation Policies)
When deleting an application through the UI, you can choose from three propagation policies:
#### 1. Foreground (Default)
- Deletes the application and all its managed resources
- Waits for all managed resources to be deleted before the Application is removed
- **Use case**: When you want to ensure all resources are cleaned up before the Application disappears
#### 2. Background
- Deletes the application and all its managed resources
- The Application is removed immediately, and resources are deleted in the background
- **Use case**: When you want faster Application deletion and don't need to wait for resource cleanup
#### 3. Non-Cascading (Orphan)
- Deletes **only** the Application resource
- All managed resources (Deployments, Services, ConfigMaps, etc.) are **preserved** in the cluster
- The finalizer is removed automatically before deletion
- **Use case**: When you want to stop managing resources through Argo CD but keep them running
> [!WARNING]
> **Important for Non-Cascading Deletes**
>
> When you select **Non-Cascading**, Argo CD will:
> - Remove the `resources-finalizer.argocd.argoproj.io` finalizer from the Application
> - Delete only the Application resource
> - Leave all managed resources (Pods, Services, Deployments, etc.) running in the cluster
>
> This behaviour is now **consistent** whether you delete from the Applications List or from the Resource Tree view.
### Best Practices for App of Apps Pattern
When working with the App of Apps pattern:
1. **Understand the impact**: Deleting a child application with Foreground or Background propagation will delete all of its managed resources
2. **Review before deleting**: Always verify what resources are managed by the application before performing a cascading delete
3. **Use Non-Cascading cautiously**: If you only want to remove the Application resource but keep the deployed workloads, use Non-Cascading delete
4. **Consider finalizers**: Ensure child applications have appropriate finalizers set based on your deletion strategy (see [Cascading Deletion](../operator-manual/cluster-bootstrapping.md#cascading-deletion))
### Example Scenarios
#### Scenario 1: Deleting a child application and all its resources
1. Navigate to the parent application's Resource Tree
2. Click the kebab menu (button with the three vertical dots) on a child Application resource
3. Select "Delete"
4. Choose **Foreground** or **Background** propagation policy
5. Confirm the deletion
**Result**: The child Application and all its managed resources (Deployments, Services, etc.) are deleted.
#### Scenario 2: Removing Argo CD management but keeping resources
1. Navigate to the Applications List or the parent application's Resource Tree
2. Click the kebab menu (button with the three vertical dots) on a child Application resource
3. Select "Delete"
4. Choose **Non-Cascading** propagation policy
5. Confirm the deletion
**Result**: Only the Application resource is deleted. All managed resources continue running in the cluster.
#### Scenario 3: Deleting from Resource Tree with context awareness
When you delete a child application from the Resource Tree view:
- Argo CD recognizes it as an Application resource (not just a generic Kubernetes resource)
- Shows "Delete child application" dialog if it detects the `app.kubernetes.io/part-of` label
- Displays an informational note explaining you're using the same behaviour as the Applications List
- Provides the same three propagation policy options
This ensures predictable and consistent deletion behaviour regardless of where you initiate the deletion.

View File

@@ -17,11 +17,12 @@ Adding the argocd.argoproj.io/hook annotation to a resource will assign it to a
## How phases work?
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following.
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following:
1. Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
2. Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
3. Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
Hooks marked with Skip will not be applied.
Here is a graphical overview of the sync process:
@@ -54,8 +55,9 @@ Argo CD also offers an alternative method of changing the sync order of resource
Hooks and resources are assigned to wave 0 by default. The wave can be negative, so you can create a wave that runs before all other resources.
When a sync operation takes place, Argo CD will:
Order all resources according to their wave (lowest to highest)
Apply the resources according to the resulting sequence
1. Order all resources according to their wave (lowest to highest)
2. Apply the resources according to the resulting sequence
There is currently a delay between each sync wave in order to give other controllers a chance to react to the spec change that was just applied. This also prevents Argo CD from assessing resource health too quickly (against the stale object), causing hooks to fire prematurely. The current delay between each sync wave is 2 seconds and can be configured via the environment variable ARGOCD_SYNC_WAVE_DELAY.
@@ -67,16 +69,16 @@ While you can use sync waves on their own, for maximum flexibility you can combi
When Argo CD starts a sync, it orders the resources in the following precedence:
The phase
The wave they are in (lower values first)
By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
By name
1. The phase
2. The wave they are in (lower values first)
3. By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
4. By name
Once the order is defined:
First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
It applies resources in that wave.
It repeats this process until all phases and waves are in-sync and healthy.
1. First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
2. It applies resources in that wave.
3. It repeats this process until all phases and waves are in-sync and healthy.
Because an application can have resources that are unhealthy in the first wave, it may be that the app can never get to healthy.

6
go.mod
View File

@@ -12,7 +12,7 @@ require (
github.com/Masterminds/sprig/v3 v3.3.0
github.com/TomOnTime/utfutil v1.0.0
github.com/alicebob/miniredis/v2 v2.35.0
github.com/argoproj/gitops-engine v0.7.1-0.20251006172252-b89b0871b414
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff
github.com/argoproj/pkg v0.13.6
github.com/argoproj/pkg/v2 v2.0.1
@@ -47,7 +47,7 @@ require (
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/golang/protobuf v1.5.4
github.com/google/btree v1.1.3
github.com/google/gnostic-models v0.7.0
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0
github.com/google/go-github/v69 v69.2.0
github.com/google/go-jsonnet v0.21.0
@@ -290,7 +290,7 @@ require (
k8s.io/controller-manager v0.34.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
k8s.io/kube-aggregator v0.34.0 // indirect
k8s.io/kubernetes v1.34.0 // indirect
k8s.io/kubernetes v1.34.2 // indirect
nhooyr.io/websocket v1.8.7 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kustomize/api v0.20.1 // indirect

8
go.sum
View File

@@ -113,8 +113,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
github.com/argoproj/gitops-engine v0.7.1-0.20251006172252-b89b0871b414 h1:2w1vd2VZja7Mlf/rblJkp6/Eq8fNDuM7p6pI4PTAJhg=
github.com/argoproj/gitops-engine v0.7.1-0.20251006172252-b89b0871b414/go.mod h1:2nqYZBhj8CfVZb3ATakZpi1KNb/yc7mpadIHslicTFI=
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d h1:iUJYrbSvpV9n8vyl1sBt1GceM60HhHfnHxuzcm5apDg=
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d/go.mod h1:PauXVUVcfiTgC+34lDdWzPS101g4NpsUtDAjFBnWf94=
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff h1:pGGAeHIktPuYCRl1Z540XdxPFnedqyUhJK4VgpyJZfY=
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
@@ -1434,8 +1434,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs=
k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4=
k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w=
k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
k8s.io/kubernetes v1.34.2 h1:WQdDvYJazkmkwSncgNwGvVtaCt4TYXIU3wSMRgvp3MI=
k8s.io/kubernetes v1.34.2/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.0
newTag: v3.2.2

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.0
newTag: v3.2.2
resources:
- ./application-controller
- ./dex

View File

@@ -239,6 +239,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
name: argocd-cmd-params-cm
key: reposerver.enable.builtin.git.config
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:

View File

@@ -24850,7 +24850,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -24985,7 +24985,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25113,7 +25113,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25386,6 +25386,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -25404,7 +25410,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25456,7 +25462,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -25804,7 +25810,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -24818,7 +24818,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -24947,7 +24947,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25220,6 +25220,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -25238,7 +25244,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25290,7 +25296,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -25638,7 +25644,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.0
newTag: v3.2.2

View File

@@ -12,7 +12,7 @@ patches:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.0
newTag: v3.2.2
resources:
- ../../base/application-controller
- ../../base/applicationset-controller

View File

@@ -26216,7 +26216,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -26351,7 +26351,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26502,7 +26502,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -26598,7 +26598,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -26722,7 +26722,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -27021,6 +27021,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -27039,7 +27045,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -27091,7 +27097,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -27465,7 +27471,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -27849,7 +27855,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -26186,7 +26186,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -26338,7 +26338,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -26434,7 +26434,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -26558,7 +26558,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -26857,6 +26857,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -26875,7 +26881,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26927,7 +26933,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -27301,7 +27307,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -27685,7 +27691,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1897,7 +1897,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2032,7 +2032,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2183,7 +2183,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2279,7 +2279,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2403,7 +2403,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2702,6 +2702,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -2720,7 +2726,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2772,7 +2778,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -3146,7 +3152,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3530,7 +3536,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1867,7 +1867,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2019,7 +2019,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2115,7 +2115,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2239,7 +2239,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2538,6 +2538,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -2556,7 +2562,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2608,7 +2614,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2982,7 +2988,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3366,7 +3372,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -25294,7 +25294,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -25429,7 +25429,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25580,7 +25580,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -25676,7 +25676,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -25778,7 +25778,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -26051,6 +26051,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -26069,7 +26075,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26121,7 +26127,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -26493,7 +26499,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -26877,7 +26883,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

22
manifests/install.yaml generated
View File

@@ -25262,7 +25262,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -25414,7 +25414,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -25510,7 +25510,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -25612,7 +25612,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25885,6 +25885,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -25903,7 +25909,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25955,7 +25961,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -26327,7 +26333,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -26711,7 +26717,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -975,7 +975,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1110,7 +1110,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1261,7 +1261,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1357,7 +1357,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1459,7 +1459,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1732,6 +1732,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -1750,7 +1756,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1802,7 +1808,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2174,7 +2180,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2558,7 +2564,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -943,7 +943,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1095,7 +1095,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1191,7 +1191,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1293,7 +1293,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1566,6 +1566,12 @@ spec:
key: reposerver.git.request.timeout
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
valueFrom:
configMapKeyRef:
key: reposerver.enable.builtin.git.config
name: argocd-cmd-params-cm
optional: true
- name: ARGOCD_GRPC_MAX_SIZE_MB
valueFrom:
configMapKeyRef:
@@ -1584,7 +1590,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1636,7 +1642,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2008,7 +2014,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2392,7 +2398,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.0
image: quay.io/argoproj/argocd:v3.2.2
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -63,6 +63,7 @@ nav:
- operator-manual/web_based_terminal.md
- operator-manual/config-management-plugins.md
- operator-manual/deep_links.md
- operator-manual/git_configuration.md
- Notifications:
- Overview: operator-manual/notifications/index.md
- operator-manual/notifications/triggers.md

View File

@@ -117,6 +117,7 @@ type RepoServerInitConstants struct {
DisableHelmManifestMaxExtractedSize bool
IncludeHiddenDirectories bool
CMPUseManifestGeneratePaths bool
EnableBuiltinGitConfig bool
}
var manifestGenerateLock = sync.NewKeyLock()
@@ -2566,7 +2567,9 @@ func (s *Service) newClient(repo *v1alpha1.Repository, opts ...git.ClientOpts) (
if err != nil {
return nil, err
}
opts = append(opts, git.WithEventHandlers(metrics.NewGitClientEventHandlers(s.metricsServer)))
opts = append(opts,
git.WithEventHandlers(metrics.NewGitClientEventHandlers(s.metricsServer)),
git.WithBuiltinGitConfig(s.initConstants.EnableBuiltinGitConfig))
return s.newGitClient(repo.Repo, repoPath, repo.GetGitCreds(s.gitCredsStore), repo.IsInsecure(), repo.EnableLFS, repo.Proxy, repo.NoProxy, opts...)
}

View File

@@ -24,14 +24,23 @@ if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in pairs(obj.status.conditions) do
-- Check if the InferenceService is Stopped
if condition.type == "Stopped" and condition.status == "True" then
health_status.status = "Suspended"
health_status.message = "InferenceService is Stopped"
return health_status
end
-- Check for unhealthy statuses
-- Note: The Stopped condition's healthy status is False
if condition.status == "Unknown" then
status_unknown = status_unknown + 1
elseif condition.status == "False" then
elseif condition.status == "False" and condition.type ~= "Stopped" then
status_false = status_false + 1
end
if condition.status ~= "True" then
-- Add the error messages if the status is unhealthy
if condition.status ~= "True" and condition.type ~= "Stopped" then
msg = msg .. " | " .. i .. ": " .. condition.type .. " | " .. condition.status
if condition.reason ~= nil and condition.reason ~= "" then
msg = msg .. " | " .. condition.reason

View File

@@ -23,6 +23,10 @@ tests:
status: Degraded
message: "0: transitionStatus | BlockedByFailedLoad"
inputPath: testdata/degraded_modelmesh.yaml
- healthStatus:
status: Suspended
message: InferenceService is Stopped
inputPath: testdata/stopped.yaml
- healthStatus:
status: Healthy
message: InferenceService is healthy.

View File

@@ -23,3 +23,7 @@ status:
- lastTransitionTime: "2023-06-20T22:44:51Z"
status: "True"
type: Ready
- lastTransitionTime: "2023-06-20T22:44:51Z"
severity: Info
status: 'False'
type: Stopped

View File

@@ -31,5 +31,9 @@ status:
severity: Info
status: 'True'
type: RoutesReady
- lastTransitionTime: '2024-05-30T22:14:31Z'
severity: Info
status: 'False'
type: Stopped
modelStatus:
transitionStatus: UpToDate

View File

@@ -17,3 +17,7 @@ status:
- lastTransitionTime: '2024-05-16T18:48:56Z'
status: 'True'
type: Ready
- lastTransitionTime: '2024-05-16T18:48:56Z'
severity: Info
status: 'False'
type: Stopped

View File

@@ -0,0 +1,23 @@
apiVersion: serving.kserve.io/v1beta1
kind: InferenceService
metadata:
name: helloworld
namespace: default
annotations:
serving.kserve.io/deploymentMode: RawDeployment
serving.kserve.io/stop: 'true'
spec: {}
status:
conditions:
- lastTransitionTime: '2024-05-16T18:48:56Z'
reason: Stopped
status: 'False'
type: PredictorReady
- lastTransitionTime: '2024-05-16T18:48:56Z'
reason: Stopped
status: 'False'
type: Ready
- lastTransitionTime: '2024-05-16T18:48:56Z'
severity: Info
status: 'True'
type: Stopped

View File

@@ -2407,7 +2407,7 @@ func (s *Server) TerminateOperation(ctx context.Context, termOpReq *application.
}
log.Warnf("failed to set operation for app %q due to update conflict. retrying again...", *termOpReq.Name)
time.Sleep(100 * time.Millisecond)
_, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
a, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting application by name: %w", err)
}

View File

@@ -29,6 +29,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
k8sbatchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@@ -4030,3 +4031,75 @@ func TestServerSideDiff(t *testing.T) {
assert.Contains(t, err.Error(), "application")
})
}
// TestTerminateOperationWithConflicts tests that TerminateOperation properly handles
// concurrent update conflicts by retrying with the fresh application object.
//
// This test reproduces a bug where the retry loop discards the fresh app object
// fetched from Get(), causing all retries to fail with stale resource versions.
func TestTerminateOperationWithConflicts(t *testing.T) {
testApp := newTestApp()
testApp.ResourceVersion = "1"
testApp.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
}
testApp.Status.OperationState = &v1alpha1.OperationState{
Operation: *testApp.Operation,
Phase: synccommon.OperationRunning,
}
appServer := newTestAppServer(t, testApp)
ctx := context.Background()
// Get the fake clientset from the deepCopy wrapper
fakeAppCs := appServer.appclientset.(*deepCopyAppClientset).GetUnderlyingClientSet().(*apps.Clientset)
getCallCount := 0
updateCallCount := 0
// Remove default reactors and add our custom ones
fakeAppCs.ReactionChain = nil
// Mock Get to return original version first, then fresh version
fakeAppCs.AddReactor("get", "applications", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
getCallCount++
freshApp := testApp.DeepCopy()
if getCallCount == 1 {
// First Get (for initialization) returns original version
freshApp.ResourceVersion = "1"
} else {
// Subsequent Gets (during retry) return fresh version
freshApp.ResourceVersion = "2"
}
return true, freshApp, nil
})
// Mock Update to return conflict on first call, success on second
fakeAppCs.AddReactor("update", "applications", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
updateCallCount++
updateAction := action.(kubetesting.UpdateAction)
app := updateAction.GetObject().(*v1alpha1.Application)
// First call (with original resource version): return conflict
if app.ResourceVersion == "1" {
return true, nil, apierrors.NewConflict(
schema.GroupResource{Group: "argoproj.io", Resource: "applications"},
app.Name,
stderrors.New("the object has been modified"),
)
}
// Second call (with refreshed resource version from Get): return success
updatedApp := app.DeepCopy()
return true, updatedApp, nil
})
// Attempt to terminate the operation
_, err := appServer.TerminateOperation(ctx, &application.OperationTerminateRequest{
Name: ptr.To(testApp.Name),
})
// Should succeed after retrying with the fresh app
require.NoError(t, err)
assert.GreaterOrEqual(t, updateCallCount, 2, "Update should be called at least twice (once with conflict, once with success)")
}

View File

@@ -1220,9 +1220,12 @@ func (server *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWeb
terminalOpts := application.TerminalOptions{DisableAuth: server.DisableAuth, Enf: server.enf}
// SSO ClientApp
server.ssoClientApp, _ = oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
terminal := application.NewHandler(server.appLister, server.Namespace, server.ApplicationNamespaces, server.db, appResourceTreeFn, server.settings.ExecShells, server.sessionMgr, &terminalOpts).
WithFeatureFlagMiddleware(server.settingsMgr.GetSettings)
th := util_session.WithAuthMiddleware(server.DisableAuth, server.sessionMgr, terminal)
th := util_session.WithAuthMiddleware(server.DisableAuth, server.settings.IsSSOConfigured(), server.ssoClientApp, server.sessionMgr, terminal)
mux.Handle("/terminal", th)
// Proxy extension is currently an alpha feature and is disabled
@@ -1252,7 +1255,7 @@ func (server *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWeb
swagger.ServeSwaggerUI(mux, assets.SwaggerJSON, "/swagger-ui", server.RootPath)
healthz.ServeHealthCheck(mux, server.healthCheck)
// Dex reverse proxy and client app and OAuth2 login/callback
// Dex reverse proxy and OAuth2 login/callback
server.registerDexHandlers(mux)
// Webhook handler for git events (Note: cache timeouts are hardcoded because API server does not write to cache and not really using them)
@@ -1304,7 +1307,7 @@ func enforceContentTypes(handler http.Handler, types []string) http.Handler {
func registerExtensions(mux *http.ServeMux, a *ArgoCDServer, metricsReg HTTPMetricsRegistry) {
a.log.Info("Registering extensions...")
extHandler := http.HandlerFunc(a.extensionManager.CallExtension())
authMiddleware := a.sessionMgr.AuthMiddlewareFunc(a.DisableAuth)
authMiddleware := a.sessionMgr.AuthMiddlewareFunc(a.DisableAuth, a.settings.IsSSOConfigured(), a.ssoClientApp)
// auth middleware ensures that requests to all extensions are authenticated first
mux.Handle(extension.URLPrefix+"/", authMiddleware(extHandler))
@@ -1358,7 +1361,7 @@ func (server *ArgoCDServer) serveExtensions(extensionsSharedPath string, w http.
}
}
// registerDexHandlers will register dex HTTP handlers, creating the OAuth client app
// registerDexHandlers will register dex HTTP handlers
func (server *ArgoCDServer) registerDexHandlers(mux *http.ServeMux) {
if !server.settings.IsSSOConfigured() {
return
@@ -1366,7 +1369,6 @@ func (server *ArgoCDServer) registerDexHandlers(mux *http.ServeMux) {
// Run dex OpenID Connect Identity Provider behind a reverse proxy (served at /api/dex)
var err error
mux.HandleFunc(common.DexAPIEndpoint+"/", dexutil.NewDexHTTPReverseProxy(server.DexServerAddr, server.BaseHRef, server.DexTLSConfig))
server.ssoClientApp, err = oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
errorsutil.CheckError(err)
mux.HandleFunc(common.LoginEndpoint, server.ssoClientApp.HandleLogin)
mux.HandleFunc(common.CallbackEndpoint, server.ssoClientApp.HandleCallback)
@@ -1577,34 +1579,15 @@ func (server *ArgoCDServer) getClaims(ctx context.Context) (jwt.Claims, string,
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
}
// Some SSO implementations (Okta) require a call to
// the OIDC user info path to get attributes like groups
// we assume that everywhere in argocd jwt.MapClaims is used as type for interface jwt.Claims
// otherwise this would cause a panic
var groupClaims jwt.MapClaims
if groupClaims, ok = claims.(jwt.MapClaims); !ok {
if tmpClaims, ok := claims.(*jwt.MapClaims); ok {
groupClaims = *tmpClaims
}
}
iss := jwtutil.StringField(groupClaims, "iss")
if iss != util_session.SessionManagerClaimsIssuer && server.settings.UserInfoGroupsEnabled() && server.settings.UserInfoPath() != "" {
userInfo, unauthorized, err := server.ssoClientApp.GetUserInfo(groupClaims, server.settings.IssuerURL(), server.settings.UserInfoPath())
if unauthorized {
log.Errorf("error while quering userinfo endpoint: %v", err)
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session")
}
finalClaims := claims
if server.settings.IsSSOConfigured() {
finalClaims, err = server.ssoClientApp.SetGroupsFromUserInfo(claims, util_session.SessionManagerClaimsIssuer)
if err != nil {
log.Errorf("error fetching user info endpoint: %v", err)
return claims, "", status.Errorf(codes.Internal, "invalid userinfo response")
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
}
if groupClaims["sub"] != userInfo["sub"] {
return claims, "", status.Error(codes.Unknown, "subject of claims from user info endpoint didn't match subject of idToken, see https://openid.net/specs/openid-connect-core-1_0.html#UserInfo")
}
groupClaims["groups"] = userInfo["groups"]
}
return groupClaims, newToken, nil
return finalClaims, newToken, nil
}
// getToken extracts the token from gRPC metadata or cookie headers

View File

@@ -3078,6 +3078,12 @@ func TestDeletionConfirmation(t *testing.T) {
Then().Expect(OperationPhaseIs(OperationRunning)).
When().ConfirmDeletion().
Then().Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy)).
// Wait for controller caches to fully settle before deletion
// This ensures both the informer and cluster watcher have the latest state
When().Refresh(RefreshTypeNormal).
Then().
When().Delete(true).
Then().
And(func(app *Application) {

View File

@@ -41,10 +41,10 @@ func TestSyncWithNoDestinationServiceAccountsInProject(t *testing.T) {
Given(t).
Path("guestbook").
When().
WithImpersonationEnabled("", nil).
CreateFromFile(func(app *v1alpha1.Application) {
app.Spec.SyncPolicy = &v1alpha1.SyncPolicy{Automated: &v1alpha1.SyncPolicyAutomated{}}
}).
WithImpersonationEnabled("", nil).
Then().
// With the impersonation feature enabled, Application sync must fail
// when there are no destination service accounts configured in AppProject

View File

@@ -1,8 +1,7 @@
import * as React from 'react';
import Moment from 'react-moment';
import {Pod, ResourceName} from '../../../shared/models';
import {isYoungerThanXMinutes} from '../utils';
import {formatMetric} from './pod-view';
import {isYoungerThanXMinutes, formatResourceInfo} from '../utils';
export const PodTooltip = (props: {pod: Pod}) => {
const pod = props.pod;
@@ -23,10 +22,20 @@ export const PodTooltip = (props: {pod: Pod}) => {
})
.map(i => {
const isPodRequests = i.name === ResourceName.ResourceCPU || i.name === ResourceName.ResourceMemory;
const formattedValue = isPodRequests ? formatMetric(i.name as ResourceName, parseInt(i.value, 10)) : i.value;
let formattedValue = i.value;
let label = `${i.name}:`;
//this is just to show cpu and mem info with "Requests" as prefix
const label = i.name === ResourceName.ResourceCPU ? 'Requests CPU:' : i.name === ResourceName.ResourceMemory ? 'Requests MEM:' : `${i.name}:`;
if (isPodRequests) {
if (i.name === ResourceName.ResourceCPU) {
const {displayValue} = formatResourceInfo('cpu', i.value);
formattedValue = displayValue;
label = 'Requests CPU:';
} else if (i.name === ResourceName.ResourceMemory) {
const {displayValue} = formatResourceInfo('memory', i.value);
formattedValue = displayValue;
label = 'Requests MEM:';
}
}
return (
<div className='row' key={i.name}>

View File

@@ -20,7 +20,7 @@
position: absolute;
color: #A3A3A3;
font-size: 10px;
top: -10px;
top: -9px;
transform: rotate(180deg);
}
}

View File

@@ -22,7 +22,8 @@ import {
NodeId,
nodeKey,
PodHealthIcon,
getUsrMsgKeyToDisplay
getUsrMsgKeyToDisplay,
formatResourceInfo
} from '../utils';
import {NodeUpdateAnimation} from './node-update-animation';
import {PodGroup} from '../application-pod-view/pod-view';
@@ -530,11 +531,19 @@ function renderPodGroup(props: ApplicationResourceTreeProps, id: string, node: R
<Tooltip
content={
<>
{(node.info || []).map(i => (
<div key={i.name}>
{i.name}: {i.value}
</div>
))}
{(node.info || []).map(i => {
// Use common formatting function for CPU and Memory
if (i.name === 'cpu' || i.name === 'memory') {
const {tooltipValue} = formatResourceInfo(i.name, `${i.value}`);
return <div key={i.name}>{tooltipValue}</div>;
} else {
return (
<div key={i.name}>
{i.name}: {i.value}
</div>
);
}
})}
</>
}
key={node.uid}>
@@ -662,9 +671,9 @@ function expandCollapse(node: ResourceTreeNode, props: ApplicationResourceTreePr
function NodeInfoDetails({tag: tag, kind: kind}: {tag: models.InfoItem; kind: string}) {
if (kind === 'Pod') {
const val = `${tag.name}`;
const val = tag.name;
if (val === 'Status Reason') {
if (`${tag.value}` !== 'ImagePullBackOff')
if (String(tag.value) !== 'ImagePullBackOff')
return (
<span className='application-resource-tree__node-label' title={`Status: ${tag.value}`}>
{tag.value}
@@ -680,10 +689,10 @@ function NodeInfoDetails({tag: tag, kind: kind}: {tag: models.InfoItem; kind: st
);
}
} else if (val === 'Containers') {
const arr = `${tag.value}`.split('/');
const arr = String(tag.value).split('/');
const title = `Number of containers in total: ${arr[1]} \nNumber of ready containers: ${arr[0]}`;
return (
<span className='application-resource-tree__node-label' title={`${title}`}>
<span className='application-resource-tree__node-label' title={title}>
{tag.value}
</span>
);
@@ -699,6 +708,14 @@ function NodeInfoDetails({tag: tag, kind: kind}: {tag: models.InfoItem; kind: st
{tag.value}
</span>
);
} else if (val === 'cpu' || val === 'memory') {
// Use common formatting function for CPU and Memory
const {displayValue, tooltipValue} = formatResourceInfo(val, String(tag.value));
return (
<span className='application-resource-tree__node-label' title={tooltipValue}>
{displayValue}
</span>
);
} else {
return (
<span className='application-resource-tree__node-label' title={`${tag.name}: ${tag.value}`}>
@@ -801,19 +818,27 @@ function renderResourceNode(props: ApplicationResourceTreeProps, id: string, nod
) : null}
{(node.info || [])
.filter(tag => !tag.name.includes('Node'))
.slice(0, 4)
.slice(0, 2)
.map((tag, i) => {
return <NodeInfoDetails tag={tag} kind={node.kind} key={i} />;
})}
{(node.info || []).length > 4 && (
{(node.info || []).length > 3 && (
<Tooltip
content={
<>
{(node.info || []).map(i => (
<div key={i.name}>
{i.name}: {i.value}
</div>
))}
{(node.info || []).map(i => {
// Use common formatting function for CPU and Memory
if (i.name === 'cpu' || i.name === 'memory') {
const {tooltipValue} = formatResourceInfo(i.name, `${i.value}`);
return <div key={i.name}>{tooltipValue}</div>;
} else {
return (
<div key={i.name}>
{i.name}: {i.value}
</div>
);
}
})}
</>
}
key={node.uid}>

View File

@@ -338,7 +338,7 @@ export const ApplicationStatusPanel = ({application, showDiff, showOperation, sh
}}>
{(data: models.ApplicationSyncWindowState) => (
<React.Fragment>
{data.assignedWindows && (
{data?.assignedWindows && (
<div className='application-status-panel__item' style={{position: 'relative'}}>
{sectionLabel({
title: 'SYNC WINDOWS',

View File

@@ -94,7 +94,7 @@ export const ApplicationsRefreshPanel = ({show, apps, hide}: {show: boolean; app
))}
</div>
</div>
<ApplicationSelector apps={apps} formApi={formApi} />
{show && <ApplicationSelector apps={apps} formApi={formApi} />}
</div>
)}
</Form>

View File

@@ -146,7 +146,7 @@ export const ApplicationsSyncPanel = ({show, apps, hide}: {show: boolean; apps:
<ApplicationRetryOptions id='applications-sync-panel' formApi={formApi} />
<ApplicationSelector apps={apps} formApi={formApi} />
{show && <ApplicationSelector apps={apps} formApi={formApi} />}
</div>
)}
</Form>

View File

@@ -1553,13 +1553,13 @@ export const SyncWindowStatusIcon = ({state, window}: {state: appModels.SyncWind
);
};
export const ApplicationSyncWindowStatusIcon = ({project, state}: {project: string; state: appModels.ApplicationSyncWindowState}) => {
export const ApplicationSyncWindowStatusIcon = ({project, state}: {project: string; state?: appModels.ApplicationSyncWindowState}) => {
let className = '';
let color = '';
let deny = false;
let allow = false;
let inactiveAllow = false;
if (state.assignedWindows !== undefined && state.assignedWindows.length > 0) {
if (state?.assignedWindows !== undefined && state?.assignedWindows.length > 0) {
if (state.activeWindows !== undefined && state.activeWindows.length > 0) {
for (const w of state.activeWindows) {
if (w.kind === 'deny') {
@@ -1815,3 +1815,43 @@ export const podRequests = {
CPU: 'Requests (CPU)',
MEMORY: 'Requests (MEM)'
} as const;
export function formatResourceInfo(name: string, value: string): {displayValue: string; tooltipValue: string} {
const numValue = parseInt(value, 10);
const formatCPUValue = (milliCpu: number): string => {
return milliCpu >= 1000 ? `${(milliCpu / 1000).toFixed(1)}` : `${milliCpu}m`;
};
const formatMemoryValue = (milliBytes: number): string => {
const mib = Math.round(milliBytes / (1024 * 1024 * 1000));
return `${mib}Mi`;
};
const formatCPUTooltip = (milliCpu: number): string => {
const displayValue = milliCpu >= 1000 ? `${(milliCpu / 1000).toFixed(1)} cores` : `${milliCpu}m`;
return `CPU Request: ${displayValue}`;
};
const formatMemoryTooltip = (milliBytes: number): string => {
const mib = Math.round(milliBytes / (1024 * 1024 * 1000));
return `Memory Request: ${mib}Mi`;
};
if (name === 'cpu') {
return {
displayValue: formatCPUValue(numValue),
tooltipValue: formatCPUTooltip(numValue)
};
} else if (name === 'memory') {
return {
displayValue: formatMemoryValue(numValue),
tooltipValue: formatMemoryTooltip(numValue)
};
}
return {
displayValue: value,
tooltipValue: `${name}: ${value}`
};
}

View File

@@ -3,9 +3,7 @@ package db
import (
"context"
"math"
"strings"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -141,22 +139,6 @@ func NewDB(namespace string, settingsMgr *settings.SettingsManager, kubeclientse
}
}
func (db *db) getSecret(name string, cache map[string]*corev1.Secret) (*corev1.Secret, error) {
if _, ok := cache[name]; !ok {
secret, err := db.settingsMgr.GetSecretByName(name)
if err != nil {
return nil, err
}
cache[name] = secret
}
return cache[name], nil
}
// StripCRLFCharacter strips the trailing CRLF characters
func StripCRLFCharacter(input string) string {
return strings.TrimSpace(input)
}
// GetApplicationControllerReplicas gets the replicas of application controller
func (db *db) GetApplicationControllerReplicas() int {
// get the replicas from application controller deployment, if the application controller deployment does not exist, check for environment variable

View File

@@ -163,7 +163,7 @@ func TestCreateWriteRepoCredentials(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "https://github.com/argoproj/", creds.URL)
secret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), RepoURLToSecretName(credSecretPrefix, creds.URL, ""), metav1.GetOptions{})
secret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), RepoURLToSecretName(credWriteSecretPrefix, creds.URL, ""), metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, common.AnnotationValueManagedByArgoCD, secret.Annotations[common.AnnotationKeyManagedBy])

View File

@@ -19,8 +19,12 @@ import (
const (
// Prefix to use for naming repository secrets
repoSecretPrefix = "repo"
// Prefix to use for naming repository write secrets
repoWriteSecretPrefix = "repo-write"
// Prefix to use for naming credential template secrets
credSecretPrefix = "creds"
// Prefix to use for naming write credential template secrets
credWriteSecretPrefix = "creds-write"
// The name of the key storing the username in the secret
username = "username"
// The name of the key storing the password in the secret

View File

@@ -26,7 +26,11 @@ type secretsRepositoryBackend struct {
}
func (s *secretsRepositoryBackend) CreateRepository(ctx context.Context, repository *appsv1.Repository) (*appsv1.Repository, error) {
secName := RepoURLToSecretName(repoSecretPrefix, repository.Repo, repository.Project)
secretPrefix := repoSecretPrefix
if s.writeCreds {
secretPrefix = repoWriteSecretPrefix
}
secName := RepoURLToSecretName(secretPrefix, repository.Repo, repository.Project)
repositorySecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -60,12 +64,8 @@ func (s *secretsRepositoryBackend) CreateRepository(ctx context.Context, reposit
// the label is found and false otherwise. Will return false if no secret is found with the given
// name.
func (s *secretsRepositoryBackend) hasRepoTypeLabel(secretName string) (bool, error) {
noCache := make(map[string]*corev1.Secret)
sec, err := s.db.getSecret(secretName, noCache)
sec, err := s.db.settingsMgr.GetSecretByName(secretName)
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
_, ok := sec.GetLabels()[common.LabelKeySecretType]
@@ -76,7 +76,7 @@ func (s *secretsRepositoryBackend) hasRepoTypeLabel(secretName string) (bool, er
}
func (s *secretsRepositoryBackend) GetRepoCredsBySecretName(_ context.Context, name string) (*appsv1.RepoCreds, error) {
secret, err := s.db.getSecret(name, map[string]*corev1.Secret{})
secret, err := s.db.settingsMgr.GetSecretByName(name)
if err != nil {
return nil, fmt.Errorf("failed to get secret %s: %w", name, err)
}
@@ -179,7 +179,11 @@ func (s *secretsRepositoryBackend) RepositoryExists(_ context.Context, repoURL,
}
func (s *secretsRepositoryBackend) CreateRepoCreds(ctx context.Context, repoCreds *appsv1.RepoCreds) (*appsv1.RepoCreds, error) {
secName := RepoURLToSecretName(credSecretPrefix, repoCreds.URL, "")
secretPrefix := credSecretPrefix
if s.writeCreds {
secretPrefix = credWriteSecretPrefix
}
secName := RepoURLToSecretName(secretPrefix, repoCreds.URL, "")
repoCredsSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -400,6 +404,8 @@ func secretToRepository(secret *corev1.Secret) (*appsv1.Repository, error) {
return repository, nil
}
// repositoryToSecret updates the given secret with the data from the repository object. It adds the appropriate
// labels/annotations, but it does not add any name or namespace metadata.
func (s *secretsRepositoryBackend) repositoryToSecret(repository *appsv1.Repository, secret *corev1.Secret) *corev1.Secret {
secretCopy := secret.DeepCopy()

View File

@@ -11,13 +11,9 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
"github.com/argoproj/argo-cd/v3/common"
appsv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
@@ -39,8 +35,8 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
InsecureIgnoreHostKey: false,
EnableLFS: true,
}
setupWithK8sObjects := func(objects ...runtime.Object) *fixture {
clientset := getClientset(objects...)
setup := func() *fixture {
clientset := getClientset()
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
repoBackend := &secretsRepositoryBackend{db: &db{
ns: testNamespace,
@@ -55,7 +51,7 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
t.Run("will create repository successfully", func(t *testing.T) {
// given
t.Parallel()
f := setupWithK8sObjects()
f := setup()
// when
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
@@ -85,21 +81,26 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
t.Run("will return proper error if secret does not have expected label", func(t *testing.T) {
// given
t.Parallel()
secret := &corev1.Secret{}
s := secretsRepositoryBackend{}
updatedSecret := s.repositoryToSecret(repo, secret)
delete(updatedSecret.Labels, common.LabelKeySecretType)
f := setupWithK8sObjects(updatedSecret)
f.clientSet.ReactionChain = nil
f.clientSet.AddReactor("create", "secrets", func(_ k8stesting.Action) (handled bool, ret runtime.Object, err error) {
gr := schema.GroupResource{
Group: "v1",
Resource: "secrets",
}
return true, nil, apierrors.NewAlreadyExists(gr, "already exists")
})
f := setup()
// when
_, err := f.repoBackend.CreateRepository(t.Context(), repo)
// then
require.NoError(t, err)
// given - remove the label from the secret
secret, err := f.clientSet.CoreV1().Secrets(testNamespace).Get(
t.Context(),
RepoURLToSecretName(repoSecretPrefix, repo.Repo, ""),
metav1.GetOptions{},
)
require.NoError(t, err)
delete(secret.Labels, common.LabelKeySecretType)
_, err = f.clientSet.CoreV1().Secrets(testNamespace).Update(t.Context(), secret, metav1.UpdateOptions{})
require.NoError(t, err)
// when - try to create the same repository again
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
// then
@@ -107,41 +108,20 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
assert.Nil(t, output)
status, ok := status.FromError(err)
assert.True(t, ok)
assert.Equal(t, codes.InvalidArgument, status.Code())
assert.Equal(t, codes.InvalidArgument, status.Code(), "got unexpected error: %v", err)
})
t.Run("will return proper error if secret already exists", func(t *testing.T) {
t.Run("will return proper error if secret already exists and does have the proper label", func(t *testing.T) {
// given
t.Parallel()
secName := RepoURLToSecretName(repoSecretPrefix, repo.Repo, "")
secret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: secName,
Namespace: "default",
},
}
s := secretsRepositoryBackend{}
updatedSecret := s.repositoryToSecret(repo, secret)
f := setupWithK8sObjects(updatedSecret)
f.clientSet.ReactionChain = nil
f.clientSet.WatchReactionChain = nil
f.clientSet.AddReactor("create", "secrets", func(_ k8stesting.Action) (handled bool, ret runtime.Object, err error) {
gr := schema.GroupResource{
Group: "v1",
Resource: "secrets",
}
return true, nil, apierrors.NewAlreadyExists(gr, "already exists")
})
watcher := watch.NewFakeWithChanSize(1, true)
watcher.Add(updatedSecret)
f.clientSet.AddWatchReactor("secrets", func(_ k8stesting.Action) (handled bool, ret watch.Interface, err error) {
return true, watcher, nil
})
f := setup()
// when
_, err := f.repoBackend.CreateRepository(t.Context(), repo)
// then
require.NoError(t, err)
// when - try to create the same repository again
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
// then
@@ -149,7 +129,7 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
assert.Nil(t, output)
status, ok := status.FromError(err)
assert.True(t, ok)
assert.Equal(t, codes.AlreadyExists, status.Code())
assert.Equal(t, codes.AlreadyExists, status.Code(), "got unexpected error: %v", err)
})
}
@@ -1180,3 +1160,86 @@ func TestRaceConditionInRepositoryOperations(t *testing.T) {
assert.Equal(t, repo.Username, finalRepo.Username)
assert.Equal(t, repo.Password, finalRepo.Password)
}
func TestCreateReadAndWriteSecretForSameURL(t *testing.T) {
clientset := getClientset()
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
repo := &appsv1.Repository{
Name: "TestRepo",
Repo: "git@github.com:argoproj/argo-cd.git",
Username: "user",
Password: "pass",
}
// Create read secret
readBackend := &secretsRepositoryBackend{db: &db{
ns: testNamespace,
kubeclientset: clientset,
settingsMgr: settingsMgr,
}, writeCreds: false}
_, err := readBackend.CreateRepository(t.Context(), repo)
require.NoError(t, err)
// Create write secret
writeBackend := &secretsRepositoryBackend{db: &db{
ns: testNamespace,
kubeclientset: clientset,
settingsMgr: settingsMgr,
}, writeCreds: true}
_, err = writeBackend.CreateRepository(t.Context(), repo)
require.NoError(t, err)
// Assert both secrets exist
readSecretName := RepoURLToSecretName(repoSecretPrefix, repo.Repo, repo.Project)
writeSecretName := RepoURLToSecretName(repoWriteSecretPrefix, repo.Repo, repo.Project)
readSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), readSecretName, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, common.LabelValueSecretTypeRepository, readSecret.Labels[common.LabelKeySecretType])
writeSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), writeSecretName, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, common.LabelValueSecretTypeRepositoryWrite, writeSecret.Labels[common.LabelKeySecretType])
}
func TestCreateReadAndWriteRepoCredsSecretForSameURL(t *testing.T) {
clientset := getClientset()
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
creds := &appsv1.RepoCreds{
URL: "git@github.com:argoproj/argo-cd.git",
Username: "user",
Password: "pass",
}
// Create read creds secret
readBackend := &secretsRepositoryBackend{db: &db{
ns: testNamespace,
kubeclientset: clientset,
settingsMgr: settingsMgr,
}, writeCreds: false}
_, err := readBackend.CreateRepoCreds(t.Context(), creds)
require.NoError(t, err)
// Create write creds secret
writeBackend := &secretsRepositoryBackend{db: &db{
ns: testNamespace,
kubeclientset: clientset,
settingsMgr: settingsMgr,
}, writeCreds: true}
_, err = writeBackend.CreateRepoCreds(t.Context(), creds)
require.NoError(t, err)
// Assert both secrets exist
readSecretName := RepoURLToSecretName(credSecretPrefix, creds.URL, "")
writeSecretName := RepoURLToSecretName(credWriteSecretPrefix, creds.URL, "")
readSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), readSecretName, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, common.LabelValueSecretTypeRepoCreds, readSecret.Labels[common.LabelKeySecretType])
writeSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), writeSecretName, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, common.LabelValueSecretTypeRepoCredsWrite, writeSecret.Labels[common.LabelKeySecretType])
}

View File

@@ -45,6 +45,18 @@ import (
var ErrInvalidRepoURL = errors.New("repo URL is invalid")
// builtinGitConfig configuration contains statements that are needed
// for correct ArgoCD operation. These settings will override any
// user-provided configuration of same options.
var builtinGitConfig = map[string]string{
"maintenance.autoDetach": "false",
"gc.autoDetach": "false",
}
// BuiltinGitConfigEnv contains builtin git configuration in the
// format acceptable by Git.
var BuiltinGitConfigEnv []string
// CommitMetadata contains metadata about a commit that is related in some way to another commit.
type CommitMetadata struct {
// Author is the author of the commit.
@@ -162,6 +174,8 @@ type nativeGitClient struct {
proxy string
// list of targets that shouldn't use the proxy, applies only if the proxy is set
noProxy string
// git configuration environment variables
gitConfigEnv []string
}
type runOpts struct {
@@ -188,6 +202,14 @@ func init() {
maxRetryDuration = env.ParseDurationFromEnv(common.EnvGitRetryMaxDuration, common.DefaultGitRetryMaxDuration, 0, math.MaxInt64)
retryDuration = env.ParseDurationFromEnv(common.EnvGitRetryDuration, common.DefaultGitRetryDuration, 0, math.MaxInt64)
factor = env.ParseInt64FromEnv(common.EnvGitRetryFactor, common.DefaultGitRetryFactor, 0, math.MaxInt64)
BuiltinGitConfigEnv = append(BuiltinGitConfigEnv, fmt.Sprintf("GIT_CONFIG_COUNT=%d", len(builtinGitConfig)))
idx := 0
for k, v := range builtinGitConfig {
BuiltinGitConfigEnv = append(BuiltinGitConfigEnv, fmt.Sprintf("GIT_CONFIG_KEY_%d=%s", idx, k))
BuiltinGitConfigEnv = append(BuiltinGitConfigEnv, fmt.Sprintf("GIT_CONFIG_VALUE_%d=%s", idx, v))
idx++
}
}
type ClientOpts func(c *nativeGitClient)
@@ -200,6 +222,16 @@ func WithCache(cache gitRefCache, loadRefFromCache bool) ClientOpts {
}
}
func WithBuiltinGitConfig(enable bool) ClientOpts {
return func(c *nativeGitClient) {
if enable {
c.gitConfigEnv = BuiltinGitConfigEnv
} else {
c.gitConfigEnv = nil
}
}
}
// WithEventHandlers sets the git client event handlers
func WithEventHandlers(handlers EventHandlers) ClientOpts {
return func(c *nativeGitClient) {
@@ -222,13 +254,14 @@ func NewClient(rawRepoURL string, creds Creds, insecure bool, enableLfs bool, pr
func NewClientExt(rawRepoURL string, root string, creds Creds, insecure bool, enableLfs bool, proxy string, noProxy string, opts ...ClientOpts) (Client, error) {
client := &nativeGitClient{
repoURL: rawRepoURL,
root: root,
creds: creds,
insecure: insecure,
enableLfs: enableLfs,
proxy: proxy,
noProxy: noProxy,
repoURL: rawRepoURL,
root: root,
creds: creds,
insecure: insecure,
enableLfs: enableLfs,
proxy: proxy,
noProxy: noProxy,
gitConfigEnv: BuiltinGitConfigEnv,
}
for i := range opts {
opts[i](client)
@@ -1111,6 +1144,8 @@ func (m *nativeGitClient) runCmdOutput(cmd *exec.Cmd, ropts runOpts) (string, er
cmd.Env = append(cmd.Env, "GIT_LFS_SKIP_SMUDGE=1")
// Disable Git terminal prompts in case we're running with a tty
cmd.Env = append(cmd.Env, "GIT_TERMINAL_PROMPT=false")
// Add Git configuration options that are essential for ArgoCD operation
cmd.Env = append(cmd.Env, m.gitConfigEnv...)
// For HTTPS repositories, we need to consider insecure repositories as well
// as custom CA bundles from the cert database.

View File

@@ -10,6 +10,7 @@ import (
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"testing"
@@ -1210,3 +1211,53 @@ Argocd-reference-commit-repourl: https://github.com/another/repo.git`,
})
}
}
func Test_BuiltinConfig(t *testing.T) {
tempDir := t.TempDir()
for _, enabled := range []bool{false, true} {
client, err := NewClientExt("file://"+tempDir, tempDir, NopCreds{}, true, false, "", "", WithBuiltinGitConfig(enabled))
require.NoError(t, err)
native := client.(*nativeGitClient)
configOut, err := native.config("--list", "--show-origin")
require.NoError(t, err)
for k, v := range builtinGitConfig {
r := regexp.MustCompile(fmt.Sprintf("(?m)^command line:\\s+%s=%s$", strings.ToLower(k), regexp.QuoteMeta(v)))
matches := r.FindString(configOut)
if enabled {
assert.NotEmpty(t, matches, "missing builtin configuration option: %s=%s", k, v)
} else {
assert.Empty(t, matches, "unexpected builtin configuration when builtin config is disabled: %s=%s", k, v)
}
}
}
}
func Test_GitNoDetachedMaintenance(t *testing.T) {
tempDir := t.TempDir()
ctx := t.Context()
client, err := NewClientExt("file://"+tempDir, tempDir, NopCreds{}, true, false, "", "")
require.NoError(t, err)
native := client.(*nativeGitClient)
err = client.Init()
require.NoError(t, err)
cmd := exec.CommandContext(ctx, "git", "fetch")
// trace execution of Git subcommands and their arguments to stderr
cmd.Env = append(cmd.Env, "GIT_TRACE=true")
// Ignore system config in case it disables auto maintenance
cmd.Env = append(cmd.Env, "GIT_CONFIG_NOSYSTEM=true")
output, err := native.runCmdOutput(cmd, runOpts{CaptureStderr: true})
require.NoError(t, err)
lines := strings.Split(output, "\n")
for _, line := range lines {
if strings.Contains(line, "git maintenance run") {
assert.NotContains(t, output, "--detach", "Unexpected --detach when running git maintenance")
return
}
}
assert.Fail(t, "Expected to see `git maintenance` run after `git fetch`")
}

View File

@@ -493,7 +493,7 @@ func (a *ClientApp) HandleCallback(w http.ResponseWriter, r *http.Request) {
}
sub := jwtutil.StringField(claims, "sub")
err = a.clientCache.Set(&cache.Item{
Key: formatAccessTokenCacheKey(sub),
Key: FormatAccessTokenCacheKey(sub),
Object: encToken,
CacheActionOpts: cache.CacheActionOpts{
Expiration: getTokenExpiration(claims),
@@ -640,6 +640,39 @@ func createClaimsAuthenticationRequestParameter(requestedClaims map[string]*oidc
return oauth2.SetAuthURLParam("claims", string(claimsRequestRAW)), nil
}
// SetGroupsFromUserInfo takes a claims object and adds groups claim from userinfo endpoint if available
// This is required by some SSO implementations as they don't provide the groups claim in the ID token
// If querying the UserInfo endpoint fails, we return an error to indicate the session is invalid
// we assume that everywhere in argocd jwt.MapClaims is used as type for interface jwt.Claims
// otherwise this would cause a panic
func (a *ClientApp) SetGroupsFromUserInfo(claims jwt.Claims, sessionManagerClaimsIssuer string) (jwt.MapClaims, error) {
var groupClaims jwt.MapClaims
var ok bool
if groupClaims, ok = claims.(jwt.MapClaims); !ok {
if tmpClaims, ok := claims.(*jwt.MapClaims); ok {
if tmpClaims != nil {
groupClaims = *tmpClaims
}
}
}
iss := jwtutil.StringField(groupClaims, "iss")
if iss != sessionManagerClaimsIssuer && a.settings.UserInfoGroupsEnabled() && a.settings.UserInfoPath() != "" {
userInfo, unauthorized, err := a.GetUserInfo(groupClaims, a.settings.IssuerURL(), a.settings.UserInfoPath())
if unauthorized {
return groupClaims, fmt.Errorf("error while quering userinfo endpoint: %w", err)
}
if err != nil {
return groupClaims, fmt.Errorf("error fetching user info endpoint: %w", err)
}
if groupClaims["sub"] != userInfo["sub"] {
return groupClaims, errors.New("subject of claims from user info endpoint didn't match subject of idToken, see https://openid.net/specs/openid-connect-core-1_0.html#UserInfo")
}
groupClaims["groups"] = userInfo["groups"]
}
return groupClaims, nil
}
// GetUserInfo queries the IDP userinfo endpoint for claims
func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoPath string) (jwt.MapClaims, bool, error) {
sub := jwtutil.StringField(actualClaims, "sub")
@@ -647,7 +680,7 @@ func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoP
var encClaims []byte
// in case we got it in the cache, we just return the item
clientCacheKey := formatUserInfoResponseCacheKey(sub)
clientCacheKey := FormatUserInfoResponseCacheKey(sub)
if err := a.clientCache.Get(clientCacheKey, &encClaims); err == nil {
claimsRaw, err := crypto.Decrypt(encClaims, a.encryptionKey)
if err != nil {
@@ -664,7 +697,7 @@ func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoP
// check if the accessToken for the user is still present
var encAccessToken []byte
err := a.clientCache.Get(formatAccessTokenCacheKey(sub), &encAccessToken)
err := a.clientCache.Get(FormatAccessTokenCacheKey(sub), &encAccessToken)
// without an accessToken we can't query the user info endpoint
// thus the user needs to reauthenticate for argocd to get a new accessToken
if errors.Is(err, cache.ErrCacheMiss) {
@@ -774,11 +807,11 @@ func getTokenExpiration(claims jwt.MapClaims) time.Duration {
}
// formatUserInfoResponseCacheKey returns the key which is used to store userinfo of user in cache
func formatUserInfoResponseCacheKey(sub string) string {
func FormatUserInfoResponseCacheKey(sub string) string {
return fmt.Sprintf("%s_%s", UserInfoResponseCachePrefix, sub)
}
// formatAccessTokenCacheKey returns the key which is used to store the accessToken of a user in cache
func formatAccessTokenCacheKey(sub string) string {
func FormatAccessTokenCacheKey(sub string) string {
return fmt.Sprintf("%s_%s", AccessTokenCachePrefix, sub)
}

View File

@@ -943,7 +943,7 @@ func TestGetUserInfo(t *testing.T) {
expectError bool
}{
{
key: formatUserInfoResponseCacheKey("randomUser"),
key: FormatUserInfoResponseCacheKey("randomUser"),
expectError: true,
},
},
@@ -958,7 +958,7 @@ func TestGetUserInfo(t *testing.T) {
encrypt bool
}{
{
key: formatAccessTokenCacheKey("randomUser"),
key: FormatAccessTokenCacheKey("randomUser"),
value: "FakeAccessToken",
encrypt: true,
},
@@ -977,7 +977,7 @@ func TestGetUserInfo(t *testing.T) {
expectError bool
}{
{
key: formatUserInfoResponseCacheKey("randomUser"),
key: FormatUserInfoResponseCacheKey("randomUser"),
expectError: true,
},
},
@@ -992,7 +992,7 @@ func TestGetUserInfo(t *testing.T) {
encrypt bool
}{
{
key: formatAccessTokenCacheKey("randomUser"),
key: FormatAccessTokenCacheKey("randomUser"),
value: "FakeAccessToken",
encrypt: true,
},
@@ -1011,7 +1011,7 @@ func TestGetUserInfo(t *testing.T) {
expectError bool
}{
{
key: formatUserInfoResponseCacheKey("randomUser"),
key: FormatUserInfoResponseCacheKey("randomUser"),
expectError: true,
},
},
@@ -1034,7 +1034,7 @@ func TestGetUserInfo(t *testing.T) {
encrypt bool
}{
{
key: formatAccessTokenCacheKey("randomUser"),
key: FormatAccessTokenCacheKey("randomUser"),
value: "FakeAccessToken",
encrypt: true,
},
@@ -1053,7 +1053,7 @@ func TestGetUserInfo(t *testing.T) {
expectError bool
}{
{
key: formatUserInfoResponseCacheKey("randomUser"),
key: FormatUserInfoResponseCacheKey("randomUser"),
expectError: true,
},
},
@@ -1086,7 +1086,7 @@ func TestGetUserInfo(t *testing.T) {
expectError bool
}{
{
key: formatUserInfoResponseCacheKey("randomUser"),
key: FormatUserInfoResponseCacheKey("randomUser"),
value: "{\"groups\":[\"githubOrg:engineers\"]}",
expectEncrypted: true,
expectError: false,
@@ -1113,7 +1113,7 @@ func TestGetUserInfo(t *testing.T) {
encrypt bool
}{
{
key: formatAccessTokenCacheKey("randomUser"),
key: FormatAccessTokenCacheKey("randomUser"),
value: "FakeAccessToken",
encrypt: true,
},
@@ -1172,3 +1172,94 @@ func TestGetUserInfo(t *testing.T) {
})
}
}
func TestSetGroupsFromUserInfo(t *testing.T) {
tests := []struct {
name string
inputClaims jwt.MapClaims // function input
cacheClaims jwt.MapClaims // userinfo response
expectedClaims jwt.MapClaims // function output
expectError bool
}{
{
name: "set correct groups from userinfo endpoint", // enriches the JWT claims with information from the userinfo endpoint, default case
inputClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
cacheClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"githubOrg:example"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []any{"githubOrg:example"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())}, // the groups must be of type any since the response we get was parsed by GetUserInfo and we don't yet know the type of the groups claim
expectError: false,
},
{
name: "return error for wrong userinfo claims returned", // when there's an error in this feature, the claims should be untouched for the rest to still proceed
inputClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
cacheClaims: jwt.MapClaims{"sub": "wrongUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
expectedClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
expectError: true,
},
{
name: "override groups already defined in input claims", // this is expected behavior since input claims might have been truncated (HTTP header 4K limit)
inputClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
cacheClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"superusers", "usergroup", "support-group"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []any{"superusers", "usergroup", "support-group"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
expectError: false,
},
{
name: "empty cache and non-rechable userinfo endpoint", // this will try to reach the userinfo endpoint defined in the test and fail
inputClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
cacheClaims: nil, // the test doesn't set the cache for an empty object
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// create the ClientApp
userInfoCache := cache.NewInMemoryCache(24 * time.Hour)
signature, err := util.MakeSignature(32)
require.NoError(t, err, "failed creating signature for settings object")
cdSettings := &settings.ArgoCDSettings{
ServerSignature: signature,
OIDCConfigRAW: `
issuer: http://localhost:63231
enableUserInfoGroups: true
userInfoPath: /`,
}
a, err := NewClientApp(cdSettings, "", nil, "/argo-cd", userInfoCache)
require.NoError(t, err, "failed creating clientapp")
// prepoluate cache to predict what the GetUserInfo function will return to the SetGroupsFromUserInfo function (without having to mock the userinfo response)
encryptionKey, err := cdSettings.GetServerEncryptionKey()
require.NoError(t, err, "failed obtaining encryption key from settings")
// set fake accessToken for function to not return early
encAccessToken, err := crypto.Encrypt([]byte("123456"), encryptionKey)
require.NoError(t, err, "failed encrypting dummy access token")
err = a.clientCache.Set(&cache.Item{
Key: FormatAccessTokenCacheKey("randomUser"),
Object: encAccessToken,
})
require.NoError(t, err, "failed setting item to in-memory cache")
// set cacheClaims to in-memory cache to let GetUserInfo return early with this information (GetUserInfo has a separate test, here we focus on SetUserInfoGroups)
if tt.cacheClaims != nil {
cacheClaims, err := json.Marshal(tt.cacheClaims)
require.NoError(t, err)
encCacheClaims, err := crypto.Encrypt([]byte(cacheClaims), encryptionKey)
require.NoError(t, err, "failed encrypting dummy access token")
err = a.clientCache.Set(&cache.Item{
Key: FormatUserInfoResponseCacheKey("randomUser"),
Object: encCacheClaims,
})
require.NoError(t, err, "failed setting item to in-memory cache")
}
receivedClaims, err := a.SetGroupsFromUserInfo(tt.inputClaims, "argocd")
if tt.expectError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, tt.expectedClaims, receivedClaims) // check that the claims were successfully enriched with what we expect
})
}
}

View File

@@ -480,9 +480,9 @@ func (mgr *SessionManager) VerifyUsernamePassword(username string, password stri
// AuthMiddlewareFunc returns a function that can be used as an
// authentication middleware for HTTP requests.
func (mgr *SessionManager) AuthMiddlewareFunc(disabled bool) func(http.Handler) http.Handler {
func (mgr *SessionManager) AuthMiddlewareFunc(disabled bool, isSSOConfigured bool, ssoClientApp *oidcutil.ClientApp) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return WithAuthMiddleware(disabled, mgr, h)
return WithAuthMiddleware(disabled, isSSOConfigured, ssoClientApp, mgr, h)
}
}
@@ -495,26 +495,41 @@ type TokenVerifier interface {
// WithAuthMiddleware is an HTTP middleware used to ensure incoming
// requests are authenticated before invoking the target handler. If
// disabled is true, it will just invoke the next handler in the chain.
func WithAuthMiddleware(disabled bool, authn TokenVerifier, next http.Handler) http.Handler {
func WithAuthMiddleware(disabled bool, isSSOConfigured bool, ssoClientApp *oidcutil.ClientApp, authn TokenVerifier, next http.Handler) http.Handler {
if disabled {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !disabled {
cookies := r.Cookies()
tokenString, err := httputil.JoinCookies(common.AuthCookieName, cookies)
if err != nil {
http.Error(w, "Auth cookie not found", http.StatusBadRequest)
return
}
claims, _, err := authn.VerifyToken(tokenString)
if err != nil {
http.Error(w, "Invalid token", http.StatusUnauthorized)
return
}
ctx := r.Context()
// Add claims to the context to inspect for RBAC
//nolint:staticcheck
ctx = context.WithValue(ctx, "claims", claims)
r = r.WithContext(ctx)
cookies := r.Cookies()
tokenString, err := httputil.JoinCookies(common.AuthCookieName, cookies)
if err != nil {
http.Error(w, "Auth cookie not found", http.StatusBadRequest)
return
}
claims, _, err := authn.VerifyToken(tokenString)
if err != nil {
http.Error(w, "Invalid token", http.StatusUnauthorized)
return
}
finalClaims := claims
if isSSOConfigured {
finalClaims, err = ssoClientApp.SetGroupsFromUserInfo(claims, SessionManagerClaimsIssuer)
if err != nil {
http.Error(w, "Invalid session", http.StatusUnauthorized)
return
}
}
ctx := r.Context()
// Add claims to the context to inspect for RBAC
//nolint:staticcheck
ctx = context.WithValue(ctx, "claims", finalClaims)
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}

View File

@@ -2,6 +2,7 @@ package session
import (
"context"
"encoding/json"
"encoding/pem"
stderrors "errors"
"fmt"
@@ -29,7 +30,11 @@ import (
apps "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned/fake"
"github.com/argoproj/argo-cd/v3/pkg/client/listers/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/test"
"github.com/argoproj/argo-cd/v3/util"
"github.com/argoproj/argo-cd/v3/util/cache"
"github.com/argoproj/argo-cd/v3/util/crypto"
jwtutil "github.com/argoproj/argo-cd/v3/util/jwt"
"github.com/argoproj/argo-cd/v3/util/oidc"
"github.com/argoproj/argo-cd/v3/util/password"
"github.com/argoproj/argo-cd/v3/util/settings"
utiltest "github.com/argoproj/argo-cd/v3/util/test"
@@ -236,20 +241,39 @@ func strPointer(str string) *string {
func TestSessionManager_WithAuthMiddleware(t *testing.T) {
handlerFunc := func() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
t.Helper()
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/text")
_, err := w.Write([]byte("Ok"))
require.NoError(t, err, "error writing response: %s", err)
contextClaims := r.Context().Value("claims")
if contextClaims != nil {
var gotClaims jwt.MapClaims
var ok bool
if gotClaims, ok = contextClaims.(jwt.MapClaims); !ok {
if tmpClaims, ok := contextClaims.(*jwt.MapClaims); ok && tmpClaims != nil {
gotClaims = *tmpClaims
}
}
jsonClaims, err := json.Marshal(gotClaims)
require.NoError(t, err, "erorr marshalling claims set by AuthMiddleware")
w.Header().Set("Content-Type", "application/json")
_, err = w.Write(jsonClaims)
require.NoError(t, err, "error writing response: %s", err)
} else {
w.Header().Set("Content-Type", "application/text")
_, err := w.Write([]byte("Ok"))
require.NoError(t, err, "error writing response: %s", err)
}
}
}
type testCase struct {
name string
authDisabled bool
ssoEnabled bool
cookieHeader bool
verifiedClaims *jwt.RegisteredClaims
verifiedClaims *jwt.MapClaims
verifyTokenErr error
userInfoCacheClaims *jwt.MapClaims
expectedStatusCode int
expectedResponseBody *string
}
@@ -258,47 +282,79 @@ func TestSessionManager_WithAuthMiddleware(t *testing.T) {
{
name: "will authenticate successfully",
authDisabled: false,
ssoEnabled: false,
cookieHeader: true,
verifiedClaims: &jwt.RegisteredClaims{},
verifiedClaims: &jwt.MapClaims{},
verifyTokenErr: nil,
userInfoCacheClaims: nil,
expectedStatusCode: http.StatusOK,
expectedResponseBody: strPointer("Ok"),
expectedResponseBody: strPointer("{}"),
},
{
name: "will be noop if auth is disabled",
authDisabled: true,
ssoEnabled: false,
cookieHeader: false,
verifiedClaims: nil,
verifyTokenErr: nil,
userInfoCacheClaims: nil,
expectedStatusCode: http.StatusOK,
expectedResponseBody: strPointer("Ok"),
},
{
name: "will return 400 if no cookie header",
authDisabled: false,
ssoEnabled: false,
cookieHeader: false,
verifiedClaims: &jwt.RegisteredClaims{},
verifiedClaims: &jwt.MapClaims{},
verifyTokenErr: nil,
userInfoCacheClaims: nil,
expectedStatusCode: http.StatusBadRequest,
expectedResponseBody: nil,
},
{
name: "will return 401 verify token fails",
authDisabled: false,
ssoEnabled: false,
cookieHeader: true,
verifiedClaims: &jwt.RegisteredClaims{},
verifiedClaims: &jwt.MapClaims{},
verifyTokenErr: stderrors.New("token error"),
userInfoCacheClaims: nil,
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: nil,
},
{
name: "will return 200 if claims are nil",
authDisabled: false,
ssoEnabled: false,
cookieHeader: true,
verifiedClaims: nil,
verifyTokenErr: nil,
userInfoCacheClaims: nil,
expectedStatusCode: http.StatusOK,
expectedResponseBody: strPointer("Ok"),
expectedResponseBody: strPointer("null"),
},
{
name: "will return 401 if sso is enabled but userinfo response not working",
authDisabled: false,
ssoEnabled: true,
cookieHeader: true,
verifiedClaims: nil,
verifyTokenErr: nil,
userInfoCacheClaims: nil, // indicates that the userinfo response will not work since cache is empty and userinfo endpoint not rechable
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: strPointer("Invalid session"),
},
{
name: "will return 200 if sso is enabled and userinfo response from cache is valid",
authDisabled: false,
ssoEnabled: true,
cookieHeader: true,
verifiedClaims: &jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
verifyTokenErr: nil,
userInfoCacheClaims: &jwt.MapClaims{"sub": "randomUser", "groups": []string{"superusers"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
expectedStatusCode: http.StatusOK,
expectedResponseBody: strPointer("\"groups\":[\"superusers\"]"),
},
}
for _, tc := range cases {
@@ -311,7 +367,47 @@ func TestSessionManager_WithAuthMiddleware(t *testing.T) {
claims: tc.verifiedClaims,
err: tc.verifyTokenErr,
}
ts := httptest.NewServer(WithAuthMiddleware(tc.authDisabled, tm, mux))
clientApp := &oidc.ClientApp{} // all testcases need at least the empty struct for the function to work
if tc.ssoEnabled {
userInfoCache := cache.NewInMemoryCache(24 * time.Hour)
signature, err := util.MakeSignature(32)
require.NoError(t, err, "failed creating signature for settings object")
cdSettings := &settings.ArgoCDSettings{
ServerSignature: signature,
OIDCConfigRAW: `
issuer: http://localhost:63231
enableUserInfoGroups: true
userInfoPath: /`,
}
clientApp, err = oidc.NewClientApp(cdSettings, "", nil, "/argo-cd", userInfoCache)
require.NoError(t, err, "failed creating clientapp")
// prepopulate the cache with claims to return for a userinfo call
encryptionKey, err := cdSettings.GetServerEncryptionKey()
require.NoError(t, err, "failed obtaining encryption key from settings")
// set fake accessToken for GetUserInfo to not return early (can be the same for all cases)
encAccessToken, err := crypto.Encrypt([]byte("123456"), encryptionKey)
require.NoError(t, err, "failed encrypting dummy access token")
err = userInfoCache.Set(&cache.Item{
Key: oidc.FormatAccessTokenCacheKey("randomUser"),
Object: encAccessToken,
})
require.NoError(t, err, "failed setting item to in-memory cache")
// set cacheClaims to in-memory cache to let GetUserInfo return early with this information
if tc.userInfoCacheClaims != nil {
cacheClaims, err := json.Marshal(tc.userInfoCacheClaims)
require.NoError(t, err)
encCacheClaims, err := crypto.Encrypt([]byte(cacheClaims), encryptionKey)
require.NoError(t, err, "failed encrypting cache Claims")
err = userInfoCache.Set(&cache.Item{
Key: oidc.FormatUserInfoResponseCacheKey("randomUser"),
Object: encCacheClaims,
})
require.NoError(t, err, "failed setting item to in-memory cache")
}
}
ts := httptest.NewServer(WithAuthMiddleware(tc.authDisabled, tc.ssoEnabled, clientApp, tm, mux))
defer ts.Close()
req, err := http.NewRequest(http.MethodGet, ts.URL, http.NoBody)
require.NoErrorf(t, err, "error creating request: %s", err)