Compare commits

...

24 Commits

Author SHA1 Message Date
github-actions[bot]
48549a2035 Bump version to 3.2.7 on release-3.2 branch (#26503)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: reggie-k <19544836+reggie-k@users.noreply.github.com>
2026-02-18 14:35:48 +02:00
dudinea
10c3fd02f4 fix: Fix excessive ls-remote requests on monorepos with Auto Sync enabled apps (26277) (cherry-pick #26278 for 3.2) (#26502)
Signed-off-by: Eugene Doudine <eugene.doudine@octopus.com>
Co-authored-by: Dan Garfield <dan.garfield@octopus.com>
2026-02-18 14:26:28 +02:00
argo-cd-cherry-pick-bot[bot]
ca08f90e96 fix(server): OIDC config via secrets fails (#18269) (cherry-pick #26214 for 3.2) (#26389)
Signed-off-by: Valéry Fouques <48053275+BZValoche@users.noreply.github.com>
Co-authored-by: Valéry Fouques <48053275+BZValoche@users.noreply.github.com>
2026-02-16 22:08:24 -10:00
argo-cd-cherry-pick-bot[bot]
1f03b27fd5 ci: exclude testdata from sonar.exclusions (cherry-pick #26398 and #26371 for 3.2) (#26424)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
Co-authored-by: Regina Voloshin <regina.voloshin@codefresh.io>
2026-02-12 17:42:27 +02:00
argo-cd-cherry-pick-bot[bot]
9c128e2d4c fix: compressedLayerExtracterStore+isCompressedLayer - allow tar.gzip suffixes (cherry-pick #26355 for 3.2) (#26375)
Signed-off-by: erin liman <erin.liman@tiktokusds.com>
Co-authored-by: erin <6914822+nepeat@users.noreply.github.com>
2026-02-10 10:58:06 -05:00
Nitish Kumar
75eddbd910 chore(deps): update group golang to v1.25.6 (cherry-pick release-3.2) (#26291)
Signed-off-by: nitishfy <justnitish06@gmail.com>
2026-02-06 00:30:10 -10:00
github-actions[bot]
65b029342d Bump version to 3.2.6 on release-3.2 branch (#26120)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: crenshaw-dev <350466+crenshaw-dev@users.noreply.github.com>
2026-01-22 14:34:29 -05:00
Codey Jenkins
2ff406ae33 fix: cherry pick #25516 to release-3.2 (#26115)
Signed-off-by: Codey Jenkins <FourFifthsCode@users.noreply.github.com>
Signed-off-by: pbhatnagar-oss <pbhatifiwork@gmail.com>
Co-authored-by: pbhatnagar-oss <pbhatifiwork@gmail.com>
Co-authored-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
2026-01-22 13:34:06 -05:00
John Soutar
76fc92f655 chore(deps): update notifications-engine to fix GitHub PR comments nil panic (cherry-pick #26065 for 3.2) (#26074)
Signed-off-by: John Soutar <john@tella.com>
2026-01-21 09:21:20 +02:00
dudinea
ad117b88a6 fix: invalid error message on health check failure (#26040) (cherry pick #26039 for 3.2) (#26070)
Signed-off-by: Eugene Doudine <eugene.doudine@octopus.com>
2026-01-20 17:34:00 +02:00
argo-cd-cherry-pick-bot[bot]
508da9c791 fix(hydrator): empty links for failed operation (#25025) (cherry-pick #26014 for 3.2) (#26016)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2026-01-15 16:26:37 -05:00
argo-cd-cherry-pick-bot[bot]
20866f4557 fix(hydrator): .gitattributes include deeply nested files (#25870) (cherry-pick #26011 for 3.2) (#26012)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2026-01-15 15:39:26 -05:00
argo-cd-cherry-pick-bot[bot]
e3b108b738 fix: close response body on error paths to prevent connection leak (cherry-pick #25824 for 3.2) (#26006)
Signed-off-by: chentiewen <tiewen.chen@aminer.cn>
Co-authored-by: QingHe <634008786@qq.com>
Co-authored-by: chentiewen <tiewen.chen@aminer.cn>
Co-authored-by: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-15 14:34:59 +01:00
github-actions[bot]
c56f4400f2 Bump version to 3.2.5 on release-3.2 branch (#25982)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: reggie-k <19544836+reggie-k@users.noreply.github.com>
2026-01-14 18:10:41 +02:00
Regina Voloshin
e9d03a633e docs: Run make codegen for notifications engine changes (#25958)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
2026-01-13 14:00:42 +02:00
github-actions[bot]
030b4f982b Bump version to 3.2.4 on release-3.2 branch (#25954)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: reggie-k <19544836+reggie-k@users.noreply.github.com>
2026-01-13 10:02:02 +02:00
Regina Voloshin
fafbd44489 feat: Cherry-pick to 3.2 update notifications engine to v0.5.1 0.20251223091026 8c0c96d8d530 (#25930)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
2026-01-12 17:06:36 +05:30
argo-cd-cherry-pick-bot[bot]
d7d9674e33 fix(appset): do not trigger reconciliation on appsets not part of allowed namespaces when updating a cluster secret (cherry-pick #25622 for 3.2) (#25911)
Signed-off-by: OpenGuidou <guillaume.doussin@gmail.com>
Co-authored-by: OpenGuidou <73480729+OpenGuidou@users.noreply.github.com>
2026-01-09 16:17:42 +01:00
argo-cd-cherry-pick-bot[bot]
e6f54030f0 fix: Only show please update resource specification message when spec… (cherry-pick #25066 for 3.2) (#25895)
Signed-off-by: Josh Soref <jsoref@gmail.com>
Co-authored-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
2026-01-07 10:11:25 -05:00
Nitish Kumar
b4146969ed chore(cherry-pick-3.2): bump expr to v1.17.7 (#25889)
Signed-off-by: nitishfy <justnitish06@gmail.com>
2026-01-07 13:31:41 +02:00
argo-cd-cherry-pick-bot[bot]
51c6375130 ci: test against k8s 1.34.2 (cherry-pick #25856 for 3.2) (#25859)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
Co-authored-by: Regina Voloshin <regina.voloshin@codefresh.io>
2026-01-05 18:47:45 +02:00
argo-cd-cherry-pick-bot[bot]
b67eb40a45 docs: link to source hydrator (cherry-pick #25813 for 3.2) (#25814)
Signed-off-by: Josh Soref <jsoref@gmail.com>
Co-authored-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
2026-01-05 10:52:46 +02:00
Nitish Kumar
8a0633b74a chore(deps): bump go to 1.25.5 (cherry-pick) (#25805)
Signed-off-by: nitishfy <justnitish06@gmail.com>
Co-authored-by: Papapetrou Patroklos <1743100+ppapapetrou76@users.noreply.github.com>
2026-01-05 10:36:04 +02:00
argo-cd-cherry-pick-bot[bot]
0d4f505954 test: fix flaky create repository test by resyncing informers (cherry-pick #25706 for 3.2) (#25795)
Signed-off-by: reggie-k <regina.voloshin@codefresh.io>
Co-authored-by: Regina Voloshin <regina.voloshin@codefresh.io>
2025-12-24 17:45:02 +02:00
63 changed files with 1786 additions and 418 deletions

View File

@@ -14,7 +14,7 @@ on:
env:
# Golang version to use across CI steps
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.25.0'
GOLANG_VERSION: '1.25.6'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -414,14 +414,14 @@ jobs:
# latest: true means that this version mush upload the coverage report to codecov.io
# We designate the latest version because we only collect code coverage for that version.
k3s:
- version: v1.33.1
- version: v1.34.2
latest: true
- version: v1.33.1
latest: false
- version: v1.32.1
latest: false
- version: v1.31.0
latest: false
- version: v1.30.4
latest: false
needs:
- build-go
- changes

View File

@@ -53,7 +53,7 @@ jobs:
with:
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.25.6
platforms: ${{ needs.set-vars.outputs.platforms }}
push: false
@@ -70,7 +70,7 @@ jobs:
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.25.6
platforms: ${{ needs.set-vars.outputs.platforms }}
push: true
secrets:

View File

@@ -11,7 +11,7 @@ permissions: {}
env:
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.25.0' # Note: go-version must also be set in job argocd-image.with.go-version
GOLANG_VERSION: '1.25.6' # Note: go-version must also be set in job argocd-image.with.go-version
jobs:
argocd-image:
@@ -25,7 +25,7 @@ jobs:
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.25.6
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
push: true
secrets:

View File

@@ -19,7 +19,14 @@ jobs:
private-key: ${{ secrets.RENOVATE_APP_PRIVATE_KEY }}
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # 6.0.1
# Some codegen commands require Go to be setup
- name: Setup Golang
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.6
- name: Self-hosted Renovate
uses: renovatebot/github-action@f8af9272cd94a4637c29f60dea8731afd3134473 #43.0.12

View File

@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS builder
FROM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS builder
WORKDIR /tmp
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS argocd-build
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd

View File

@@ -1,4 +1,4 @@
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6
FROM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -1 +1 @@
3.2.3
3.2.7

View File

@@ -652,8 +652,9 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
Watches(
&corev1.Secret{},
&clusterSecretEventHandler{
Client: mgr.GetClient(),
Log: log.WithField("type", "createSecretEventHandler"),
Client: mgr.GetClient(),
Log: log.WithField("type", "createSecretEventHandler"),
ApplicationSetNamespaces: r.ApplicationSetNamespaces,
}).
Complete(r)
}

View File

@@ -14,6 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"github.com/argoproj/argo-cd/v3/applicationset/utils"
"github.com/argoproj/argo-cd/v3/common"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
)
@@ -22,8 +23,9 @@ import (
// requeue any related ApplicationSets.
type clusterSecretEventHandler struct {
// handler.EnqueueRequestForOwner
Log log.FieldLogger
Client client.Client
Log log.FieldLogger
Client client.Client
ApplicationSetNamespaces []string
}
func (h *clusterSecretEventHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
@@ -68,6 +70,10 @@ func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Contex
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
for _, appSet := range appSetList.Items {
if !utils.IsNamespaceAllowed(h.ApplicationSetNamespaces, appSet.GetNamespace()) {
// Ignore it as not part of the allowed list of namespaces in which to watch Appsets
continue
}
foundClusterGenerator := false
for _, generator := range appSet.Spec.Generators {
if generator.Clusters != nil {

View File

@@ -137,7 +137,7 @@ func TestClusterEventHandler(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{
Name: "my-app-set",
Namespace: "another-namespace",
Namespace: "argocd",
},
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{
@@ -171,9 +171,37 @@ func TestClusterEventHandler(t *testing.T) {
},
},
expectedRequests: []reconcile.Request{
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
},
},
{
name: "cluster generators in other namespaces should not match",
items: []argov1alpha1.ApplicationSet{
{
ObjectMeta: metav1.ObjectMeta{
Name: "my-app-set",
Namespace: "my-namespace-not-allowed",
},
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{
{
Clusters: &argov1alpha1.ClusterGenerator{},
},
},
},
},
},
secret: corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
},
},
},
expectedRequests: []reconcile.Request{},
},
{
name: "non-argo cd secret should not match",
items: []argov1alpha1.ApplicationSet{
@@ -552,8 +580,9 @@ func TestClusterEventHandler(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
handler := &clusterSecretEventHandler{
Client: fakeClient,
Log: log.WithField("type", "createSecretEventHandler"),
Client: fakeClient,
Log: log.WithField("type", "createSecretEventHandler"),
ApplicationSetNamespaces: []string{"argocd"},
}
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}

View File

@@ -21,8 +21,8 @@ import (
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
const gitAttributesContents = `*/README.md linguist-generated=true
*/hydrator.metadata linguist-generated=true`
const gitAttributesContents = `**/README.md linguist-generated=true
**/hydrator.metadata linguist-generated=true`
func init() {
// Avoid allowing the user to learn things about the environment.

View File

@@ -7,8 +7,10 @@ import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"testing"
"time"
@@ -234,6 +236,74 @@ func TestWriteGitAttributes(t *testing.T) {
gitAttributesPath := filepath.Join(root.Name(), ".gitattributes")
gitAttributesBytes, err := os.ReadFile(gitAttributesPath)
require.NoError(t, err)
assert.Contains(t, string(gitAttributesBytes), "*/README.md linguist-generated=true")
assert.Contains(t, string(gitAttributesBytes), "*/hydrator.metadata linguist-generated=true")
assert.Contains(t, string(gitAttributesBytes), "README.md linguist-generated=true")
assert.Contains(t, string(gitAttributesBytes), "hydrator.metadata linguist-generated=true")
}
func TestWriteGitAttributes_MatchesAllDepths(t *testing.T) {
root := tempRoot(t)
err := writeGitAttributes(root)
require.NoError(t, err)
// The gitattributes pattern needs to match files at all depths:
// - hydrator.metadata (root level)
// - path1/hydrator.metadata (one level deep)
// - path1/nested/deep/hydrator.metadata (multiple levels deep)
// Same for README.md files
//
// The pattern "**/hydrator.metadata" matches at any depth including root
// The pattern "*/hydrator.metadata" only matches exactly one directory level deep
// Test actual Git behavior using git check-attr
// Initialize a git repo
ctx := t.Context()
repoPath := root.Name()
cmd := exec.CommandContext(ctx, "git", "init")
cmd.Dir = repoPath
output, err := cmd.CombinedOutput()
require.NoError(t, err, "Failed to init git repo: %s", string(output))
// Test files at different depths
testCases := []struct {
path string
shouldMatch bool
description string
}{
{"hydrator.metadata", true, "root level hydrator.metadata"},
{"README.md", true, "root level README.md"},
{"path1/hydrator.metadata", true, "one level deep hydrator.metadata"},
{"path1/README.md", true, "one level deep README.md"},
{"path1/nested/hydrator.metadata", true, "two levels deep hydrator.metadata"},
{"path1/nested/README.md", true, "two levels deep README.md"},
{"path1/nested/deep/hydrator.metadata", true, "three levels deep hydrator.metadata"},
{"path1/nested/deep/README.md", true, "three levels deep README.md"},
{"manifest.yaml", false, "manifest.yaml should not match"},
{"path1/manifest.yaml", false, "nested manifest.yaml should not match"},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
// Use git check-attr to verify if linguist-generated attribute is set
cmd := exec.CommandContext(ctx, "git", "check-attr", "linguist-generated", tc.path)
cmd.Dir = repoPath
output, err := cmd.CombinedOutput()
require.NoError(t, err, "Failed to run git check-attr: %s", string(output))
// Output format: <path>: <attribute>: <value>
// Example: "hydrator.metadata: linguist-generated: true"
outputStr := strings.TrimSpace(string(output))
if tc.shouldMatch {
expectedOutput := tc.path + ": linguist-generated: true"
assert.Equal(t, expectedOutput, outputStr,
"File %s should have linguist-generated=true attribute", tc.path)
} else {
// Attribute should be unspecified
expectedOutput := tc.path + ": linguist-generated: unspecified"
assert.Equal(t, expectedOutput, outputStr,
"File %s should not have linguist-generated=true attribute", tc.path)
}
})
}
}

View File

@@ -1508,8 +1508,18 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
// if we just completed an operation, force a refresh so that UI will report up-to-date
// sync/health information
if _, err := cache.MetaNamespaceKeyFunc(app); err == nil {
// force app refresh with using CompareWithLatest comparison type and trigger app reconciliation loop
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatestForceResolve.Pointer(), nil)
var compareWith CompareWith
if state.Operation.InitiatedBy.Automated {
// Do not force revision resolution on automated operations because
// this would cause excessive Ls-Remote requests on monorepo commits
compareWith = CompareWithLatest
} else {
// Force app refresh with using most recent resolved revision after sync,
// so UI won't show a just synced application being out of sync if it was
// synced after commit but before app. refresh (see #18153)
compareWith = CompareWithLatestForceResolve
}
ctrl.requestAppRefresh(app.QualifiedName(), compareWith.Pointer(), nil)
} else {
logCtx.Warnf("Fails to requeue application: %v", err)
}

View File

@@ -2321,6 +2321,41 @@ func TestProcessRequestedAppOperation_Successful(t *testing.T) {
assert.Equal(t, CompareWithLatestForceResolve, level)
}
func TestProcessRequestedAppAutomatedOperation_Successful(t *testing.T) {
app := newFakeApp()
app.Spec.Project = "default"
app.Operation = &v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
InitiatedBy: v1alpha1.OperationInitiator{
Automated: true,
},
}
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, &defaultProj},
manifestResponses: []*apiclient.ManifestResponse{{
Manifests: []string{},
}},
}, nil)
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
receivedPatch := map[string]any{}
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if patchAction, ok := action.(kubetesting.PatchAction); ok {
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
}
return true, &v1alpha1.Application{}, nil
})
ctrl.processRequestedAppOperation(app)
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
assert.Equal(t, "successfully synced (no more tasks)", message)
ok, level := ctrl.isRefreshRequested(ctrl.toAppKey(app.Name))
assert.True(t, ok)
assert.Equal(t, CompareWithLatest, level)
}
func TestProcessRequestedAppOperation_SyncTimeout(t *testing.T) {
testCases := []struct {
name string

View File

@@ -260,7 +260,7 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
Revision: revision,
SyncedRevision: syncedRevision,
NoRevisionCache: noRevisionCache,
Paths: path.GetAppRefreshPaths(app),
Paths: path.GetSourceRefreshPaths(app, source),
AppLabelKey: appLabelKey,
AppName: app.InstanceName(m.namespace),
Namespace: appNamespace,

View File

@@ -11,6 +11,10 @@ The notification service is used to push events to [Alertmanager](https://github
* `basicAuth` - optional, server auth
* `bearerToken` - optional, server auth
* `timeout` - optional, the timeout in seconds used when sending alerts, default is "3 seconds"
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
* `maxConnsPerHost` - optional, maximum total connections per host.
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing.
`basicAuth` or `bearerToken` is used for authentication, you can choose one. If the two are set at the same time, `basicAuth` takes precedence over `bearerToken`.

View File

@@ -12,6 +12,19 @@ The Email notification service sends email notifications using SMTP protocol and
* `html` - optional bool, true or false
* `insecure_skip_verify` - optional bool, true or false
### Using Gmail
When configuring Gmail as the SMTP service:
* `username` - Must be your Gmail address.
* `password` - Use an App Password, not your regular Gmail password.
To Generate an app password, follow this link https://myaccount.google.com/apppasswords
!!! note
This applies to personal Gmail accounts (non-Google Workspace). For Google Workspace users, SMTP settings
and authentication methods may differ.
## Example
The following snippet contains sample Gmail service configuration:
@@ -23,11 +36,11 @@ metadata:
name: argocd-notifications-cm
data:
service.email.gmail: |
username: $email-username
password: $email-password
username: $username
password: $password
host: smtp.gmail.com
port: 465
from: $email-username
from: $email-address
```
Without authentication:
@@ -41,7 +54,7 @@ data:
service.email.example: |
host: smtp.example.com
port: 587
from: $email-username
from: $email-address
```
## Template

View File

@@ -8,6 +8,10 @@ The GitHub notification service changes commit status using [GitHub Apps](https:
- `installationID` - the app installation id
- `privateKey` - the app private key
- `enterpriseBaseURL` - optional URL, e.g. https://git.example.com/api/v3
- `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
- `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
- `maxConnsPerHost` - optional, maximum total connections per host.
- `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing.
> ⚠️ _NOTE:_ Specifying `/api/v3` in the `enterpriseBaseURL` is required until [argoproj/notifications-engine#205](https://github.com/argoproj/notifications-engine/issues/205) is resolved.

View File

@@ -9,6 +9,10 @@ Available parameters :
* `apiURL` - the server url, e.g. https://grafana.example.com
* `apiKey` - the API key for the serviceaccount
* `insecureSkipVerify` - optional bool, true or false
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
* `maxConnsPerHost` - optional, maximum total connections per host.
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing.
1. Login to your Grafana instance as `admin`
2. On the left menu, go to Configuration / API Keys

View File

@@ -5,6 +5,10 @@
* `apiURL` - the server url, e.g. https://mattermost.example.com
* `token` - the bot token
* `insecureSkipVerify` - optional bool, true or false
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
* `maxConnsPerHost` - optional, maximum total connections per host.
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing, e.g. '90s'.
## Configuration

View File

@@ -4,6 +4,10 @@
* `apiURL` - the api server url, e.g. https://api.newrelic.com
* `apiKey` - a [NewRelic ApiKey](https://docs.newrelic.com/docs/apis/rest-api-v2/get-started/introduction-new-relic-rest-api-v2/#api_key)
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
* `maxConnsPerHost` - optional, maximum total connections per host.
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing, e.g. '90s'.
## Configuration

View File

@@ -47,7 +47,8 @@ metadata:
* [Grafana](./grafana.md)
* [Webhook](./webhook.md)
* [Telegram](./telegram.md)
* [Teams](./teams.md)
* [Teams (Office 365 Connectors)](./teams.md) - Legacy service (deprecated, retires March 31, 2026)
* [Teams Workflows](./teams-workflows.md) - Recommended replacement for Office 365 Connectors
* [Google Chat](./googlechat.md)
* [Rocket.Chat](./rocketchat.md)
* [Pushover](./pushover.md)

View File

@@ -62,6 +62,8 @@ The parameters for the PagerDuty configuration in the template generally match w
* `group` - Logical grouping of components of a service.
* `class` - The class/type of the event.
* `url` - The URL that should be used for the link "View in ArgoCD" in PagerDuty.
* `dedupKey` - A string used by PagerDuty to deduplicate and correlate events. Events with the same `dedupKey` will be grouped into the same incident. If omitted, PagerDuty will create a new incident for each event.
The `timestamp` and `custom_details` parameters are not currently supported.

View File

@@ -16,6 +16,11 @@ The Slack notification service configuration includes following settings:
| `token` | **True** | `string` | The app's OAuth access token. | `xoxb-1234567890-1234567890123-5n38u5ed63fgzqlvuyxvxcx6` |
| `username` | False | `string` | The app username. | `argocd` |
| `disableUnfurl` | False | `bool` | Disable slack unfurling links in messages | `true` |
| `maxIdleConns` | False | `int` | Maximum number of idle (keep-alive) connections across all hosts. | — |
| `maxIdleConnsPerHost` | False | `int` | Maximum number of idle (keep-alive) connections per host. | — |
| `maxConnsPerHost` | False | `int` | Maximum total connections per host. | — |
| `idleConnTimeout` | False | `string` | Maximum amount of time an idle (keep-alive) connection will remain open before closing (e.g., `90s`). | — |
## Configuration

View File

@@ -0,0 +1,370 @@
# Teams Workflows
## Overview
The Teams Workflows notification service sends message notifications using Microsoft Teams Workflows (Power Automate). This is the recommended replacement for the legacy Office 365 Connectors service, which will be retired on March 31, 2026.
## Parameters
The Teams Workflows notification service requires specifying the following settings:
* `recipientUrls` - the webhook url map, e.g. `channelName: https://api.powerautomate.com/webhook/...`
## Supported Webhook URL Formats
The service supports the following Microsoft Teams Workflows webhook URL patterns:
- `https://api.powerautomate.com/...`
- `https://api.powerplatform.com/...`
- `https://flow.microsoft.com/...`
- URLs containing `/powerautomate/` in the path
## Configuration
1. Open `Teams` and go to the channel you wish to set notifications for
2. Click on the 3 dots next to the channel name
3. Select`Workflows`
4. Click on `Manage`
5. Click `New flow`
6. Write `Send webhook alerts to a channel` in the search bar or select it from the template list
7. Choose your team and channel
8. Configure the webhook name and settings
9. Copy the webhook URL (it will be from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`)
10. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-notifications-cm
data:
service.teams-workflows: |
recipientUrls:
channelName: $channel-workflows-url
```
```yaml
apiVersion: v1
kind: Secret
metadata:
name: <secret-name>
stringData:
channel-workflows-url: https://api.powerautomate.com/webhook/your-webhook-id
```
11. Create subscription for your Teams Workflows integration:
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
annotations:
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
```
## Channel Support
- ✅ Standard Teams channels
- ✅ Shared channels (as of December 2025)
- ✅ Private channels (as of December 2025)
Teams Workflows provides enhanced channel support compared to Office 365 Connectors, allowing you to post to shared and private channels in addition to standard channels.
## Adaptive Card Format
The Teams Workflows service uses **Adaptive Cards** exclusively, which is the modern, flexible card format for Microsoft Teams. All notifications are automatically converted to Adaptive Card format and wrapped in the required message envelope.
### Option 1: Using Template Fields (Recommended)
The service automatically converts template fields to Adaptive Card format. This is the simplest and most maintainable approach:
```yaml
template.app-sync-succeeded: |
teams-workflows:
# ThemeColor supports Adaptive Card semantic colors: "Good", "Warning", "Attention", "Accent"
# or hex colors like "#000080"
themeColor: "Good"
title: Application {{.app.metadata.name}} has been successfully synced
text: Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.
summary: "{{.app.metadata.name}} sync succeeded"
facts: |
[{
"name": "Sync Status",
"value": "{{.app.status.sync.status}}"
}, {
"name": "Repository",
"value": "{{.app.spec.source.repoURL}}"
}]
sections: |
[{
"facts": [
{
"name": "Namespace",
"value": "{{.app.metadata.namespace}}"
},
{
"name": "Cluster",
"value": "{{.app.spec.destination.server}}"
}
]
}]
potentialAction: |-
[{
"@type": "OpenUri",
"name": "View in Argo CD",
"targets": [{
"os": "default",
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
}]
}]
```
**How it works:**
- `title` → Converted to a large, bold TextBlock
- `text` → Converted to a regular TextBlock
- `facts` → Converted to a FactSet element
- `sections` → Facts within sections are extracted and converted to FactSet elements
- `potentialAction` → OpenUri actions are converted to Action.OpenUrl
- `themeColor` → Applied to the title TextBlock (supports semantic colors like "Good", "Warning", "Attention", "Accent" or hex colors)
### Option 2: Custom Adaptive Card JSON
For full control and advanced features, you can provide a complete Adaptive Card JSON template:
```yaml
template.app-sync-succeeded: |
teams-workflows:
adaptiveCard: |
{
"type": "AdaptiveCard",
"version": "1.4",
"body": [
{
"type": "TextBlock",
"text": "Application {{.app.metadata.name}} synced successfully",
"size": "Large",
"weight": "Bolder",
"color": "Good"
},
{
"type": "TextBlock",
"text": "Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.",
"wrap": true
},
{
"type": "FactSet",
"facts": [
{
"title": "Sync Status",
"value": "{{.app.status.sync.status}}"
},
{
"title": "Repository",
"value": "{{.app.spec.source.repoURL}}"
}
]
}
],
"actions": [
{
"type": "Action.OpenUrl",
"title": "View in Argo CD",
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
}
]
}
```
**Note:** When using `adaptiveCard`, you only need to provide the AdaptiveCard JSON structure (not the full message envelope). The service automatically wraps it in the required `message` + `attachments` format for Teams Workflows.
**Important:** If you provide `adaptiveCard`, it takes precedence over all other template fields (`title`, `text`, `facts`, etc.).
## Template Fields
The Teams Workflows service supports the following template fields, which are automatically converted to Adaptive Card format:
### Standard Fields
- `title` - Message title (converted to large, bold TextBlock)
- `text` - Message text content (converted to TextBlock)
- `summary` - Summary text (currently not used in Adaptive Cards, but preserved for compatibility)
- `themeColor` - Color for the title. Supports:
- Semantic colors: `"Good"` (green), `"Warning"` (yellow), `"Attention"` (red), `"Accent"` (blue)
- Hex colors: `"#000080"`, `"#FF0000"`, etc.
- `facts` - JSON array of fact key-value pairs (converted to FactSet)
```yaml
facts: |
[{
"name": "Status",
"value": "{{.app.status.sync.status}}"
}]
```
- `sections` - JSON array of sections containing facts (facts are extracted and converted to FactSet)
```yaml
sections: |
[{
"facts": [{
"name": "Namespace",
"value": "{{.app.metadata.namespace}}"
}]
}]
```
- `potentialAction` - JSON array of action buttons (OpenUri actions converted to Action.OpenUrl)
```yaml
potentialAction: |-
[{
"@type": "OpenUri",
"name": "View Details",
"targets": [{
"os": "default",
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
}]
}]
```
### Advanced Fields
- `adaptiveCard` - Complete Adaptive Card JSON template (takes precedence over all other fields)
- Only provide the AdaptiveCard structure, not the message envelope
- Supports full Adaptive Card 1.4 specification
- Allows access to all Adaptive Card features (containers, columns, images, etc.)
- `template` - Raw JSON template (legacy, use `adaptiveCard` instead)
### Field Conversion Details
| Template Field | Adaptive Card Element | Notes |
|---------------|----------------------|-------|
| `title` | `TextBlock` with `size: "Large"`, `weight: "Bolder"` | ThemeColor applied to this element |
| `text` | `TextBlock` with `wrap: true` | Uses `n.Message` if `text` is empty |
| `facts` | `FactSet` | Each fact becomes a `title`/`value` pair |
| `sections[].facts` | `FactSet` | Facts extracted from sections |
| `potentialAction[OpenUri]` | `Action.OpenUrl` | Only OpenUri actions are converted |
| `themeColor` | Applied to title `TextBlock.color` | Supports semantic and hex colors |
## Migration from Office 365 Connectors
If you're currently using the `teams` service with Office 365 Connectors, follow these steps to migrate:
1. **Create a new Workflows webhook** using the configuration steps above
2. **Update your service configuration:**
- Change from `service.teams` to `service.teams-workflows`
- Update the webhook URL to your new Workflows webhook URL
3. **Update your templates:**
- Change `teams:` to `teams-workflows:` in your templates
- Your existing template fields (`title`, `text`, `facts`, `sections`, `potentialAction`) will automatically be converted to Adaptive Card format
- No changes needed to your template structure - the conversion is automatic
4. **Update your subscriptions:**
```yaml
# Old
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
# New
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
```
5. **Test and verify:**
- Send a test notification to verify it works correctly
- Once verified, you can remove the old Office 365 Connector configuration
**Note:** Your existing templates will work without modification. The service automatically converts your template fields to Adaptive Card format, so you get the benefits of modern cards without changing your templates.
## Differences from Office 365 Connectors
| Feature | Office 365 Connectors | Teams Workflows |
|---------|----------------------|-----------------|
| Service Name | `teams` | `teams-workflows` |
| Standard Channels | ✅ | ✅ |
| Shared Channels | ❌ | ✅ (Dec 2025+) |
| Private Channels | ❌ | ✅ (Dec 2025+) |
| Card Format | messageCard (legacy) | Adaptive Cards (modern) |
| Template Conversion | N/A | Automatic conversion from template fields |
| Retirement Date | March 31, 2026 | Active |
## Adaptive Card Features
The Teams Workflows service leverages Adaptive Cards, which provide:
- **Rich Content**: Support for text, images, fact sets, and more
- **Flexible Layout**: Containers, columns, and adaptive layouts
- **Interactive Elements**: Action buttons, input fields, and more
- **Semantic Colors**: Built-in color schemes (Good, Warning, Attention, Accent)
- **Cross-Platform**: Works across Teams, Outlook, and other Microsoft 365 apps
### Example: Advanced Adaptive Card Template
For complex notifications, you can use the full Adaptive Card specification:
```yaml
template.app-sync-succeeded-advanced: |
teams-workflows:
adaptiveCard: |
{
"type": "AdaptiveCard",
"version": "1.4",
"body": [
{
"type": "Container",
"items": [
{
"type": "ColumnSet",
"columns": [
{
"type": "Column",
"width": "auto",
"items": [
{
"type": "Image",
"url": "https://example.com/success-icon.png",
"size": "Small"
}
]
},
{
"type": "Column",
"width": "stretch",
"items": [
{
"type": "TextBlock",
"text": "Application {{.app.metadata.name}}",
"weight": "Bolder",
"size": "Large"
},
{
"type": "TextBlock",
"text": "Successfully synced",
"spacing": "None",
"isSubtle": true
}
]
}
]
},
{
"type": "FactSet",
"facts": [
{
"title": "Status",
"value": "{{.app.status.sync.status}}"
},
{
"title": "Repository",
"value": "{{.app.spec.source.repoURL}}"
}
]
}
]
}
],
"actions": [
{
"type": "Action.OpenUrl",
"title": "View in Argo CD",
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
}
]
}
```

View File

@@ -1,18 +1,46 @@
# Teams
# Teams (Office 365 Connectors)
## ⚠️ Deprecation Notice
**Office 365 Connectors are being retired by Microsoft.**
Microsoft is retiring the Office 365 Connectors service in Teams. The service will be fully retired by **March 31, 2026** (extended from the original timeline of December 2025).
### What this means:
- **Old Office 365 Connectors** (webhook URLs from `webhook.office.com`) will stop working after the retirement date
- **New Power Automate Workflows** (webhook URLs from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`) are the recommended replacement
### Migration Required:
If you are currently using Office 365 Connectors (Incoming Webhook), you should migrate to Power Automate Workflows before the retirement date. The notifications-engine automatically detects the webhook type and handles both formats, but you should plan your migration.
**Migration Resources:**
- [Microsoft Deprecation Notice](https://devblogs.microsoft.com/microsoft365dev/retirement-of-office-365-connectors-within-microsoft-teams/)
- [Create incoming webhooks with Workflows for Microsoft Teams](https://support.microsoft.com/en-us/office/create-incoming-webhooks-with-workflows-for-microsoft-teams-4b3b0b0e-0b5a-4b5a-9b5a-0b5a-4b5a-9b5a)
---
## Parameters
The Teams notification service send message notifications using Teams bot and requires specifying the following settings:
The Teams notification service sends message notifications using Office 365 Connectors and requires specifying the following settings:
* `recipientUrls` - the webhook url map, e.g. `channelName: https://example.com`
* `recipientUrls` - the webhook url map, e.g. `channelName: https://outlook.office.com/webhook/...`
> **⚠️ Deprecation Notice:** Office 365 Connectors will be retired by Microsoft on **March 31, 2026**. We recommend migrating to the [Teams Workflows service](./teams-workflows.md) for continued support and enhanced features.
## Configuration
> **💡 For Power Automate Workflows (Recommended):** See the [Teams Workflows documentation](./teams-workflows.md) for detailed configuration instructions.
### Office 365 Connectors (Deprecated - Retiring March 31, 2026)
> **⚠️ Warning:** This method is deprecated and will stop working after March 31, 2026. Please migrate to Power Automate Workflows.
1. Open `Teams` and goto `Apps`
2. Find `Incoming Webhook` microsoft app and click on it
3. Press `Add to a team` -> select team and channel -> press `Set up a connector`
4. Enter webhook name and upload image (optional)
5. Press `Create` then copy webhook url and store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
5. Press `Create` then copy webhook url (it will be from `webhook.office.com`)
6. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
```yaml
apiVersion: v1
@@ -31,10 +59,20 @@ kind: Secret
metadata:
name: <secret-name>
stringData:
channel-teams-url: https://example.com
channel-teams-url: https://webhook.office.com/webhook/your-webhook-id # Office 365 Connector (deprecated)
```
6. Create subscription for your Teams integration:
> **Note:** For Power Automate Workflows webhooks, use the [Teams Workflows service](./teams-workflows.md) instead.
### Webhook Type Detection
The `teams` service supports Office 365 Connectors (deprecated):
- **Office 365 Connectors**: URLs from `webhook.office.com` (deprecated)
- Requires response body to be exactly `"1"` for success
- Will stop working after March 31, 2026
7. Create subscription for your Teams integration:
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -44,12 +82,20 @@ metadata:
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
```
## Channel Support
- ✅ Standard Teams channels only
> **Note:** Office 365 Connectors only support standard Teams channels. For shared channels or private channels, use the [Teams Workflows service](./teams-workflows.md).
## Templates
![](https://user-images.githubusercontent.com/18019529/114271500-9d2b8880-9a4c-11eb-85c1-f6935f0431d5.png)
[Notification templates](../templates.md) can be customized to leverage teams message sections, facts, themeColor, summary and potentialAction [feature](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using).
The Teams service uses the **messageCard** format (MessageCard schema) which is compatible with Office 365 Connectors.
```yaml
template.app-sync-succeeded: |
teams:
@@ -124,3 +170,7 @@ template.app-sync-succeeded: |
teams:
summary: "Sync Succeeded"
```
## Migration to Teams Workflows
If you're currently using Office 365 Connectors, see the [Teams Workflows documentation](./teams-workflows.md) for migration instructions and enhanced features.

View File

@@ -14,6 +14,10 @@ The Webhook notification service configuration includes following settings:
- `retryWaitMin` - Optional, the minimum wait time between retries. Default value: 1s.
- `retryWaitMax` - Optional, the maximum wait time between retries. Default value: 5s.
- `retryMax` - Optional, the maximum number of retries. Default value: 3.
- `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
- `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
- `maxConnsPerHost` - optional, maximum total connections per host.
- `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing, e.g. '90s'.
## Retry Behavior
@@ -74,6 +78,29 @@ metadata:
notifications.argoproj.io/subscribe.<trigger-name>.<webhook-name>: ""
```
4. TLS configuration (optional)
If your webhook server uses a custom TLS certificate, you can configure the notification service to trust it by adding the certificate to the `argocd-tls-certs-cm` ConfigMap as shown below:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-tls-certs-cm
data:
<hostname>: |
-----BEGIN CERTIFICATE-----
<TLS DATA>
-----END CERTIFICATE-----
```
*NOTE:*
*If the custom certificate is not trusted, you may encounter errors such as:*
```
Put \"https://...\": x509: certificate signed by unknown authority
```
*Adding the server's certificate to `argocd-tls-certs-cm` resolves this issue.*
## Examples
### Set GitHub commit status

View File

@@ -1,5 +1,5 @@
| Argo CD version | Kubernetes versions |
|-----------------|---------------------|
| 3.2 | v1.33, v1.32, v1.31, v1.30 |
| 3.1 | v1.33, v1.32, v1.31, v1.30 |
| 3.2 | v1.34, v1.33, v1.32, v1.31 |
| 3.1 | v1.34, v1.33, v1.32, v1.31 |
| 3.0 | v1.32, v1.31, v1.30, v1.29 |

View File

@@ -4,7 +4,7 @@
### Hydration paths must now be non-root
Source hydration now requires that every application specify a non-root path.
Source hydration (with [Source Hydrator](../../../user-guide/source-hydrator/)) now requires that every application specify a non-root path.
Using the repository root (for example, "" or ".") is no longer supported. This change ensures
that hydration outputs are isolated to a dedicated subdirectory and prevents accidental overwrites
or deletions of important files stored at the root, such as CI pipelines, documentation, or configuration files.

6
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/argoproj/argo-cd/v3
go 1.25.0
go 1.25.5
require (
code.gitea.io/sdk/gitea v0.22.0
@@ -13,7 +13,7 @@ require (
github.com/TomOnTime/utfutil v1.0.0
github.com/alicebob/miniredis/v2 v2.35.0
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630
github.com/argoproj/pkg v0.13.6
github.com/argoproj/pkg/v2 v2.0.1
github.com/aws/aws-sdk-go v1.55.7
@@ -29,7 +29,7 @@ require (
github.com/dlclark/regexp2 v1.11.5
github.com/dustin/go-humanize v1.0.1
github.com/evanphx/json-patch v5.9.11+incompatible
github.com/expr-lang/expr v1.17.6
github.com/expr-lang/expr v1.17.7
github.com/felixge/httpsnoop v1.0.4
github.com/fsnotify/fsnotify v1.9.0
github.com/gfleury/go-bitbucket-v1 v0.0.0-20240917142304-df385efaac68

8
go.sum
View File

@@ -115,8 +115,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d h1:iUJYrbSvpV9n8vyl1sBt1GceM60HhHfnHxuzcm5apDg=
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d/go.mod h1:PauXVUVcfiTgC+34lDdWzPS101g4NpsUtDAjFBnWf94=
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff h1:pGGAeHIktPuYCRl1Z540XdxPFnedqyUhJK4VgpyJZfY=
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630 h1:naE5KNRTOALjF5nVIGUHrHU5xjlB8QJJiCu+aISIlSs=
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8=
github.com/argoproj/pkg/v2 v2.0.1 h1:O/gCETzB/3+/hyFL/7d/VM/6pSOIRWIiBOTb2xqAHvc=
@@ -257,8 +257,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/expr-lang/expr v1.17.6 h1:1h6i8ONk9cexhDmowO/A64VPxHScu7qfSl2k8OlINec=
github.com/expr-lang/expr v1.17.6/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=

View File

@@ -1,6 +1,6 @@
module github.com/argoproj/argo-cd/get-previous-release
go 1.23.5
go 1.25.5
require (
github.com/stretchr/testify v1.9.0

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.3
newTag: v3.2.7

View File

@@ -5,7 +5,7 @@ kind: Kustomization
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.3
newTag: v3.2.7
resources:
- ./application-controller
- ./dex

View File

@@ -24850,7 +24850,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -24985,7 +24985,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25113,7 +25113,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25410,7 +25410,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25462,7 +25462,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -25810,7 +25810,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -24818,7 +24818,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -24947,7 +24947,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25244,7 +25244,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25296,7 +25296,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -25644,7 +25644,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -12,4 +12,4 @@ resources:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.3
newTag: v3.2.7

View File

@@ -12,7 +12,7 @@ patches:
images:
- name: quay.io/argoproj/argocd
newName: quay.io/argoproj/argocd
newTag: v3.2.3
newTag: v3.2.7
resources:
- ../../base/application-controller
- ../../base/applicationset-controller

View File

@@ -26216,7 +26216,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -26351,7 +26351,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26502,7 +26502,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -26598,7 +26598,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -26722,7 +26722,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -27045,7 +27045,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -27097,7 +27097,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -27471,7 +27471,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -27855,7 +27855,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -26186,7 +26186,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -26338,7 +26338,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -26434,7 +26434,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -26558,7 +26558,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -26881,7 +26881,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26933,7 +26933,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -27307,7 +27307,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -27691,7 +27691,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1897,7 +1897,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2032,7 +2032,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2183,7 +2183,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2279,7 +2279,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2403,7 +2403,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2726,7 +2726,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2778,7 +2778,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -3152,7 +3152,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3536,7 +3536,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -1867,7 +1867,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -2019,7 +2019,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -2115,7 +2115,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -2239,7 +2239,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -2562,7 +2562,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -2614,7 +2614,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2988,7 +2988,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -3372,7 +3372,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -25294,7 +25294,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -25429,7 +25429,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25580,7 +25580,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -25676,7 +25676,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -25778,7 +25778,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -26075,7 +26075,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -26127,7 +26127,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -26499,7 +26499,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -26883,7 +26883,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

16
manifests/install.yaml generated
View File

@@ -25262,7 +25262,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -25414,7 +25414,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -25510,7 +25510,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -25612,7 +25612,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -25909,7 +25909,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -25961,7 +25961,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -26333,7 +26333,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -26717,7 +26717,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -975,7 +975,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1110,7 +1110,7 @@ spec:
key: log.format.timestamp
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1261,7 +1261,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1357,7 +1357,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1459,7 +1459,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1756,7 +1756,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1808,7 +1808,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2180,7 +2180,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2564,7 +2564,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -943,7 +943,7 @@ spec:
key: applicationsetcontroller.status.max.resources.count
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-applicationset-controller
ports:
@@ -1095,7 +1095,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /shared/argocd-dex
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: copyutil
securityContext:
@@ -1191,7 +1191,7 @@ spec:
key: notificationscontroller.repo.server.plaintext
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
tcpSocket:
@@ -1293,7 +1293,7 @@ spec:
- argocd
- admin
- redis-initial-password
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: IfNotPresent
name: secret-init
securityContext:
@@ -1590,7 +1590,7 @@ spec:
value: /helm-working-dir
- name: HELM_DATA_HOME
value: /helm-working-dir
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -1642,7 +1642,7 @@ spec:
- -n
- /usr/local/bin/argocd
- /var/run/argocd/argocd-cmp-server
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
name: copyutil
securityContext:
allowPrivilegeEscalation: false
@@ -2014,7 +2014,7 @@ spec:
key: server.sync.replace.allowed
name: argocd-cmd-params-cm
optional: true
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -2398,7 +2398,7 @@ spec:
optional: true
- name: KUBECACHEDIR
value: /tmp/kubecache
image: quay.io/argoproj/argocd:v3.2.3
image: quay.io/argoproj/argocd:v3.2.7
imagePullPolicy: Always
name: argocd-application-controller
ports:

View File

@@ -92,6 +92,7 @@ nav:
- operator-manual/notifications/services/pushover.md
- operator-manual/notifications/services/rocketchat.md
- operator-manual/notifications/services/slack.md
- operator-manual/notifications/services/teams-workflows.md
- operator-manual/notifications/services/teams.md
- operator-manual/notifications/services/telegram.md
- operator-manual/notifications/services/webex.md

View File

@@ -1,10 +1,20 @@
package apiclient
import (
"context"
"io"
"net/http"
"net/http/httptest"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
func Test_parseHeaders(t *testing.T) {
@@ -39,3 +49,234 @@ func Test_parseGRPCHeaders(t *testing.T) {
assert.ErrorContains(t, err, "additional headers must be colon(:)-separated: foo")
})
}
func TestExecuteRequest_ClosesBodyOnHTTPError(t *testing.T) {
bodyClosed := &atomic.Bool{}
// Create a test server that returns HTTP 500 error
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
// Create client with custom httpClient that tracks body closure
originalTransport := http.DefaultTransport
customTransport := &testTransport{
base: originalTransport,
bodyClosed: bodyClosed,
}
c := &client{
ServerAddr: server.URL[7:], // Remove "http://"
PlainText: true,
httpClient: &http.Client{
Transport: customTransport,
},
GRPCWebRootPath: "",
}
// Execute request that should fail with HTTP 500
ctx := context.Background()
md := metadata.New(map[string]string{})
_, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
// Verify error was returned
require.Error(t, err)
assert.Contains(t, err.Error(), "failed with status code 500")
// Give a small delay to ensure Close() was called
time.Sleep(10 * time.Millisecond)
// Verify body was closed to prevent connection leak
assert.True(t, bodyClosed.Load(), "response body should be closed on HTTP error to prevent connection leak")
}
func TestExecuteRequest_ClosesBodyOnGRPCError(t *testing.T) {
bodyClosed := &atomic.Bool{}
// Create a test server that returns HTTP 200 but with gRPC error status
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Grpc-Status", "3") // codes.InvalidArgument
w.Header().Set("Grpc-Message", "invalid argument")
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
// Create client with custom httpClient that tracks body closure
originalTransport := http.DefaultTransport
customTransport := &testTransport{
base: originalTransport,
bodyClosed: bodyClosed,
}
c := &client{
ServerAddr: server.URL[7:], // Remove "http://"
PlainText: true,
httpClient: &http.Client{
Transport: customTransport,
},
GRPCWebRootPath: "",
}
// Execute request that should fail with gRPC error
ctx := context.Background()
md := metadata.New(map[string]string{})
_, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
// Verify gRPC error was returned
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid argument")
// Give a small delay to ensure Close() was called
time.Sleep(10 * time.Millisecond)
// Verify body was closed to prevent connection leak
assert.True(t, bodyClosed.Load(), "response body should be closed on gRPC error to prevent connection leak")
}
func TestExecuteRequest_ConcurrentErrorRequests_NoConnectionLeak(t *testing.T) {
// This test simulates the scenario from the test script:
// Multiple concurrent requests that fail should all close their response bodies
var totalRequests atomic.Int32
var closedBodies atomic.Int32
// Create a test server that always returns errors
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
totalRequests.Add(1)
// Alternate between HTTP errors and gRPC errors
if totalRequests.Load()%2 == 0 {
w.WriteHeader(http.StatusBadRequest)
} else {
w.Header().Set("Grpc-Status", strconv.Itoa(int(codes.PermissionDenied)))
w.Header().Set("Grpc-Message", "permission denied")
w.WriteHeader(http.StatusOK)
}
}))
defer server.Close()
// Create client with custom transport that tracks closures
customTransport := &testTransport{
base: http.DefaultTransport,
bodyClosed: &atomic.Bool{},
onClose: func() {
closedBodies.Add(1)
},
}
c := &client{
ServerAddr: server.URL[7:],
PlainText: true,
httpClient: &http.Client{
Transport: customTransport,
},
GRPCWebRootPath: "",
}
// Simulate concurrent requests like in the test script
concurrency := 10
iterations := 5
var wg sync.WaitGroup
for iter := 0; iter < iterations; iter++ {
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
ctx := context.Background()
md := metadata.New(map[string]string{})
_, err := c.executeRequest(ctx, "/application.ApplicationService/ManagedResources", []byte("test"), md)
// We expect errors
assert.Error(t, err)
}()
}
wg.Wait()
}
// Give time for all Close() calls to complete
time.Sleep(100 * time.Millisecond)
// Verify all response bodies were closed
expectedTotal := int32(concurrency * iterations)
assert.Equal(t, expectedTotal, totalRequests.Load(), "all requests should have been made")
assert.Equal(t, expectedTotal, closedBodies.Load(), "all response bodies should be closed to prevent connection leaks")
}
func TestExecuteRequest_SuccessDoesNotCloseBodyPrematurely(t *testing.T) {
// Verify that successful requests do NOT close the body in executeRequest
// (caller is responsible for closing in success case)
bodyClosed := &atomic.Bool{}
// Create a test server that returns success
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Grpc-Status", "0") // codes.OK
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
customTransport := &testTransport{
base: http.DefaultTransport,
bodyClosed: bodyClosed,
}
c := &client{
ServerAddr: server.URL[7:],
PlainText: true,
httpClient: &http.Client{
Transport: customTransport,
},
GRPCWebRootPath: "",
}
// Execute successful request
ctx := context.Background()
md := metadata.New(map[string]string{})
resp, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
// Verify success
require.NoError(t, err)
require.NotNil(t, resp)
defer resp.Body.Close()
// Verify body was NOT closed by executeRequest (caller's responsibility)
time.Sleep(10 * time.Millisecond)
assert.False(t, bodyClosed.Load(), "response body should NOT be closed by executeRequest on success - caller is responsible")
}
// testTransport wraps http.RoundTripper to track body closures
type testTransport struct {
base http.RoundTripper
bodyClosed *atomic.Bool
onClose func() // Optional callback for each close
}
func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := t.base.RoundTrip(req)
if err != nil {
return nil, err
}
// Wrap the response body to track Close() calls
resp.Body = &closeTracker{
ReadCloser: resp.Body,
closed: t.bodyClosed,
onClose: t.onClose,
}
return resp, nil
}
type closeTracker struct {
io.ReadCloser
closed *atomic.Bool
onClose func()
}
func (c *closeTracker) Close() error {
c.closed.Store(true)
if c.onClose != nil {
c.onClose()
}
return c.ReadCloser.Close()
}

View File

@@ -86,6 +86,9 @@ func (c *client) executeRequest(ctx context.Context, fullMethodName string, msg
return nil, err
}
if resp.StatusCode != http.StatusOK {
if resp.Body != nil {
utilio.Close(resp.Body)
}
return nil, fmt.Errorf("%s %s failed with status code %d", req.Method, req.URL, resp.StatusCode)
}
var code codes.Code
@@ -97,6 +100,9 @@ func (c *client) executeRequest(ctx context.Context, fullMethodName string, msg
code = codes.Code(statusInt)
}
if code != codes.OK {
if resp.Body != nil {
utilio.Close(resp.Body)
}
return nil, status.Error(code, resp.Header.Get("Grpc-Message"))
}
}

View File

@@ -8,7 +8,8 @@ sonar.projectVersion=1.0
# Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows.
sonar.sources=.
sonar.exclusions=docs/**
# Exclude docs and testdata with kustomization files (Sonar IaC parser fails on empty/edge-case YAML)
sonar.exclusions=docs/**,**/testdata/**
# Encoding of the source code. Default is default system encoding
sonar.sourceEncoding=UTF-8
@@ -24,5 +25,5 @@ sonar.go.exclusions=**/vendor/**,**/*.pb.go,**/*_test.go,**/*.pb.gw.go,**/mocks/
# Exclude following set of patterns from duplication detection
sonar.cpd.exclusions=**/*.pb.go,**/*.g.cs,**/*.gw.go,**/mocks/*,docs/**
# Exclude test manifests from analysis
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**
# Exclude test manifests from analysis (avoids Sonar IaC parser errors on empty/edge-case kustomization files)
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**,util/app/discovery/testdata/**,reposerver/repository/testdata/**

View File

@@ -37,15 +37,17 @@ export const ApplicationHydrateOperationState: React.FunctionComponent<Props> =
if (hydrateOperationState.finishedAt && hydrateOperationState.phase !== 'Hydrating') {
operationAttributes.push({title: 'FINISHED AT', value: <Timestamp date={hydrateOperationState.finishedAt} />});
}
operationAttributes.push({
title: 'DRY REVISION',
value: (
<div>
<Revision repoUrl={hydrateOperationState.sourceHydrator.drySource.repoURL} revision={hydrateOperationState.drySHA} />
</div>
)
});
if (hydrateOperationState.finishedAt) {
if (hydrateOperationState.drySHA) {
operationAttributes.push({
title: 'DRY REVISION',
value: (
<div>
<Revision repoUrl={hydrateOperationState.sourceHydrator.drySource.repoURL} revision={hydrateOperationState.drySHA} />
</div>
)
});
}
if (hydrateOperationState.finishedAt && hydrateOperationState.hydratedSHA) {
operationAttributes.push({
title: 'HYDRATED REVISION',
value: (

View File

@@ -272,13 +272,15 @@ export const ApplicationNodeInfo = (props: {
Resource not found in cluster:{' '}
{`${props?.controlled?.state?.targetState?.apiVersion}/${props?.controlled?.state?.targetState?.kind}:${props.node.name}`}
<br />
{props?.controlled?.state?.normalizedLiveState?.apiVersion && (
<span>
Please update your resource specification to use the latest Kubernetes API resources supported by the target cluster. The
recommended syntax is{' '}
{`${props.controlled.state.normalizedLiveState.apiVersion}/${props?.controlled.state.normalizedLiveState?.kind}:${props.node.name}`}
</span>
)}
{props?.controlled?.state?.normalizedLiveState?.apiVersion &&
`${props?.controlled?.state?.targetState?.apiVersion}/${props?.controlled?.state?.targetState?.kind}:${props.node.name}` !==
`${props.controlled.state.normalizedLiveState.apiVersion}/${props?.controlled.state.normalizedLiveState?.kind}:${props.node.name}` && (
<span>
Please update your resource specification to use the latest Kubernetes API resources supported by the target cluster. The
recommended syntax is{' '}
{`${props.controlled.state.normalizedLiveState.apiVersion}/${props?.controlled.state.normalizedLiveState?.kind}:${props.node.name}`}
</span>
)}
</div>
)}
</React.Fragment>

View File

@@ -511,7 +511,9 @@ export interface HydrateOperation {
finishedAt?: models.Time;
phase: HydrateOperationPhase;
message: string;
// drySHA is the sha of the DRY commit being hydrated. This will be empty if the operation is not successful.
drySHA: string;
// hydratedSHA is the sha of the hydrated commit. This will be empty if the operation is not successful.
hydratedSHA: string;
sourceHydrator: SourceHydrator;
}

View File

@@ -97,23 +97,41 @@ func CheckOutOfBoundsSymlinks(basePath string) error {
})
}
// GetAppRefreshPaths returns the list of paths that should trigger a refresh for an application
func GetAppRefreshPaths(app *v1alpha1.Application) []string {
// GetSourceRefreshPaths returns the list of paths that should trigger a refresh for an application.
// The source parameter influences the returned refresh paths:
// - if source hydrator configured AND source is syncSource: use sync source path (ignores annotation)
// - if source hydrator configured AND source is drySource WITH annotation: use annotation paths with drySource base
// - if source hydrator not configured: use annotation paths with source base, or empty if no annotation
func GetSourceRefreshPaths(app *v1alpha1.Application, source v1alpha1.ApplicationSource) []string {
annotationPaths, hasAnnotation := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
if app.Spec.SourceHydrator != nil {
syncSource := app.Spec.SourceHydrator.GetSyncSource()
// if source is syncSource use the source path
if (source).Equals(&syncSource) {
return []string{source.Path}
}
}
var paths []string
if val, ok := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]; ok && val != "" {
for _, item := range strings.Split(val, ";") {
if hasAnnotation && annotationPaths != "" {
for _, item := range strings.Split(annotationPaths, ";") {
// skip empty paths
if item == "" {
continue
}
// if absolute path, add as is
if filepath.IsAbs(item) {
paths = append(paths, item[1:])
} else {
for _, source := range app.Spec.GetSources() {
paths = append(paths, filepath.Clean(filepath.Join(source.Path, item)))
}
continue
}
// add the path relative to the source path
paths = append(paths, filepath.Clean(filepath.Join(source.Path, item)))
}
}
return paths
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
fileutil "github.com/argoproj/argo-cd/v3/test/fixture/path"
@@ -100,96 +101,38 @@ func TestAbsSymlink(t *testing.T) {
assert.Equal(t, "abslink", oobError.File)
}
func getApp(annotation string, sourcePath string) *v1alpha1.Application {
return &v1alpha1.Application{
func getApp(annotation *string, sourcePath *string) *v1alpha1.Application {
app := &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha1.AnnotationKeyManifestGeneratePaths: annotation,
},
},
Spec: v1alpha1.ApplicationSpec{
Source: &v1alpha1.ApplicationSource{
Path: sourcePath,
},
Name: "test-app",
},
}
if annotation != nil {
app.Annotations = make(map[string]string)
app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths] = *annotation
}
if sourcePath != nil {
app.Spec.Source = &v1alpha1.ApplicationSource{
Path: *sourcePath,
}
}
return app
}
func getMultiSourceApp(annotation string, paths ...string) *v1alpha1.Application {
var sources v1alpha1.ApplicationSources
for _, path := range paths {
sources = append(sources, v1alpha1.ApplicationSource{Path: path})
}
return &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha1.AnnotationKeyManifestGeneratePaths: annotation,
},
func getSourceHydratorApp(annotation *string, drySourcePath string, syncSourcePath string) *v1alpha1.Application {
app := getApp(annotation, nil)
app.Spec.SourceHydrator = &v1alpha1.SourceHydrator{
DrySource: v1alpha1.DrySource{
Path: drySourcePath,
},
Spec: v1alpha1.ApplicationSpec{
Sources: sources,
SyncSource: v1alpha1.SyncSource{
Path: syncSourcePath,
},
}
}
func Test_AppFilesHaveChanged(t *testing.T) {
t.Parallel()
tests := []struct {
name string
app *v1alpha1.Application
files []string
changeExpected bool
}{
{"default no path", &v1alpha1.Application{}, []string{"README.md"}, true},
{"no files changed", getApp(".", "source/path"), []string{}, true},
{"relative path - matching", getApp(".", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"relative path, multi source - matching #1", getMultiSourceApp(".", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
{"relative path, multi source - matching #2", getMultiSourceApp(".", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"relative path - not matching", getApp(".", "source/path"), []string{"README.md"}, false},
{"relative path, multi source - not matching", getMultiSourceApp(".", "other/path", "unrelated/path"), []string{"README.md"}, false},
{"absolute path - matching", getApp("/source/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"absolute path, multi source - matching #1", getMultiSourceApp("/source/path", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
{"absolute path, multi source - matching #2", getMultiSourceApp("/source/path", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"absolute path - not matching", getApp("/source/path1", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
{"absolute path, multi source - not matching", getMultiSourceApp("/source/path1", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
{"glob path * - matching", getApp("/source/**/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"glob path * - not matching", getApp("/source/**/my-service.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
{"glob path ? - matching", getApp("/source/path/my-deployment-?.yaml", "source/path"), []string{"source/path/my-deployment-0.yaml"}, true},
{"glob path ? - not matching", getApp("/source/path/my-deployment-?.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
{"glob path char range - matching", getApp("/source/path[0-9]/my-deployment.yaml", "source/path"), []string{"source/path1/my-deployment.yaml"}, true},
{"glob path char range - not matching", getApp("/source/path[0-9]/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
{"mixed glob path - matching", getApp("/source/path[0-9]/my-*.yaml", "source/path"), []string{"source/path1/my-deployment.yaml"}, true},
{"mixed glob path - not matching", getApp("/source/path[0-9]/my-*.yaml", "source/path"), []string{"README.md"}, false},
{"two relative paths - matching", getApp(".;../shared", "my-app"), []string{"shared/my-deployment.yaml"}, true},
{"two relative paths, multi source - matching #1", getMultiSourceApp(".;../shared", "my-app", "other/path"), []string{"shared/my-deployment.yaml"}, true},
{"two relative paths, multi source - matching #2", getMultiSourceApp(".;../shared", "my-app", "other/path"), []string{"shared/my-deployment.yaml"}, true},
{"two relative paths - not matching", getApp(".;../shared", "my-app"), []string{"README.md"}, false},
{"two relative paths, multi source - not matching", getMultiSourceApp(".;../shared", "my-app", "other/path"), []string{"README.md"}, false},
{"file relative path - matching", getApp("./my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"file relative path, multi source - matching #1", getMultiSourceApp("./my-deployment.yaml", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
{"file relative path, multi source - matching #2", getMultiSourceApp("./my-deployment.yaml", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"file relative path - not matching", getApp("./my-deployment.yaml", "source/path"), []string{"README.md"}, false},
{"file relative path, multi source - not matching", getMultiSourceApp("./my-deployment.yaml", "source/path", "other/path"), []string{"README.md"}, false},
{"file absolute path - matching", getApp("/source/path/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"file absolute path, multi source - matching #1", getMultiSourceApp("/source/path/my-deployment.yaml", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
{"file absolute path, multi source - matching #2", getMultiSourceApp("/source/path/my-deployment.yaml", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
{"file absolute path - not matching", getApp("/source/path1/README.md", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
{"file absolute path, multi source - not matching", getMultiSourceApp("/source/path1/README.md", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, false},
{"file two relative paths - matching", getApp("./README.md;../shared/my-deployment.yaml", "my-app"), []string{"shared/my-deployment.yaml"}, true},
{"file two relative paths, multi source - matching", getMultiSourceApp("./README.md;../shared/my-deployment.yaml", "my-app", "other-path"), []string{"shared/my-deployment.yaml"}, true},
{"file two relative paths - not matching", getApp(".README.md;../shared/my-deployment.yaml", "my-app"), []string{"kustomization.yaml"}, false},
{"file two relative paths, multi source - not matching", getMultiSourceApp(".README.md;../shared/my-deployment.yaml", "my-app", "other-path"), []string{"kustomization.yaml"}, false},
{"changed file absolute path - matching", getApp(".", "source/path"), []string{"/source/path/my-deployment.yaml"}, true},
}
for _, tt := range tests {
ttc := tt
t.Run(ttc.name, func(t *testing.T) {
t.Parallel()
refreshPaths := GetAppRefreshPaths(ttc.app)
assert.Equal(t, ttc.changeExpected, AppFilesHaveChanged(refreshPaths, ttc.files), "AppFilesHaveChanged()")
})
}
return app
}
func Test_GetAppRefreshPaths(t *testing.T) {
@@ -198,23 +141,64 @@ func Test_GetAppRefreshPaths(t *testing.T) {
tests := []struct {
name string
app *v1alpha1.Application
source v1alpha1.ApplicationSource
expectedPaths []string
}{
{"default no path", &v1alpha1.Application{}, []string{}},
{"relative path", getApp(".", "source/path"), []string{"source/path"}},
{"absolute path - multi source", getMultiSourceApp("/source/path", "source/path", "other/path"), []string{"source/path"}},
{"two relative paths ", getApp(".;../shared", "my-app"), []string{"my-app", "shared"}},
{"file relative path", getApp("./my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}},
{"file absolute path", getApp("/source/path/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}},
{"file two relative paths", getApp("./README.md;../shared/my-deployment.yaml", "my-app"), []string{"my-app/README.md", "shared/my-deployment.yaml"}},
{"glob path", getApp("/source/*/my-deployment.yaml", "source/path"), []string{"source/*/my-deployment.yaml"}},
{"empty path", getApp(".;", "source/path"), []string{"source/path"}},
{
name: "single source without annotation",
app: getApp(nil, ptr.To("source/path")),
source: v1alpha1.ApplicationSource{Path: "source/path"},
expectedPaths: []string{},
},
{
name: "single source with annotation",
app: getApp(ptr.To(".;dev/deploy;other/path"), ptr.To("source/path")),
source: v1alpha1.ApplicationSource{Path: "source/path"},
expectedPaths: []string{"source/path", "source/path/dev/deploy", "source/path/other/path"},
},
{
name: "single source with empty annotation",
app: getApp(ptr.To(".;;"), ptr.To("source/path")),
source: v1alpha1.ApplicationSource{Path: "source/path"},
expectedPaths: []string{"source/path"},
},
{
name: "single source with absolute path annotation",
app: getApp(ptr.To("/fullpath/deploy;other/path"), ptr.To("source/path")),
source: v1alpha1.ApplicationSource{Path: "source/path"},
expectedPaths: []string{"fullpath/deploy", "source/path/other/path"},
},
{
name: "source hydrator sync source without annotation",
app: getSourceHydratorApp(nil, "dry/path", "sync/path"),
source: v1alpha1.ApplicationSource{Path: "sync/path"},
expectedPaths: []string{"sync/path"},
},
{
name: "source hydrator dry source without annotation",
app: getSourceHydratorApp(nil, "dry/path", "sync/path"),
source: v1alpha1.ApplicationSource{Path: "dry/path"},
expectedPaths: []string{},
},
{
name: "source hydrator sync source with annotation",
app: getSourceHydratorApp(ptr.To("deploy"), "dry/path", "sync/path"),
source: v1alpha1.ApplicationSource{Path: "sync/path"},
expectedPaths: []string{"sync/path"},
},
{
name: "source hydrator dry source with annotation",
app: getSourceHydratorApp(ptr.To("deploy"), "dry/path", "sync/path"),
source: v1alpha1.ApplicationSource{Path: "dry/path"},
expectedPaths: []string{"dry/path/deploy"},
},
}
for _, tt := range tests {
ttc := tt
t.Run(ttc.name, func(t *testing.T) {
t.Parallel()
assert.ElementsMatch(t, ttc.expectedPaths, GetAppRefreshPaths(ttc.app), "GetAppRefreshPath()")
assert.ElementsMatch(t, ttc.expectedPaths, GetSourceRefreshPaths(ttc.app, ttc.source), "GetAppRefreshPath()")
})
}
}

View File

@@ -100,6 +100,10 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
_, err = f.clientSet.CoreV1().Secrets(testNamespace).Update(t.Context(), secret, metav1.UpdateOptions{})
require.NoError(t, err)
// Resync informers to ensure the cache reflects the updated secret
err = f.repoBackend.db.settingsMgr.ResyncInformers()
require.NoError(t, err)
// when - try to create the same repository again
output, err := f.repoBackend.CreateRepository(t.Context(), repo)

View File

@@ -3,6 +3,7 @@ package healthz
import (
"fmt"
"net/http"
"time"
log "github.com/sirupsen/logrus"
)
@@ -11,9 +12,13 @@ import (
// ServeHealthCheck relies on the provided function to return an error if unhealthy and nil otherwise.
func ServeHealthCheck(mux *http.ServeMux, f func(r *http.Request) error) {
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
startTs := time.Now()
if err := f(r); err != nil {
w.WriteHeader(http.StatusServiceUnavailable)
log.Errorln(w, err)
log.WithFields(log.Fields{
"duration": time.Since(startTs),
"component": "healthcheck",
}).WithError(err).Error("Error serving health check request")
} else {
fmt.Fprintln(w, "ok")
}

View File

@@ -5,16 +5,22 @@ import (
"net"
"net/http"
"testing"
"time"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestHealthCheck(t *testing.T) {
sentinel := false
lc := &net.ListenConfig{}
ctx := t.Context()
svcErrMsg := "This is a dummy error"
serve := func(c chan<- string) {
// listen on first available dynamic (unprivileged) port
listener, err := net.Listen("tcp", ":0")
listener, err := lc.Listen(ctx, "tcp", ":0")
if err != nil {
panic(err)
}
@@ -25,7 +31,7 @@ func TestHealthCheck(t *testing.T) {
mux := http.NewServeMux()
ServeHealthCheck(mux, func(_ *http.Request) error {
if sentinel {
return errors.New("This is a dummy error")
return errors.New(svcErrMsg)
}
return nil
})
@@ -47,7 +53,23 @@ func TestHealthCheck(t *testing.T) {
require.Equalf(t, http.StatusOK, resp.StatusCode, "Was expecting status code 200 from health check, but got %d instead", resp.StatusCode)
sentinel = true
hook := test.NewGlobal()
resp, _ = http.Get(server + "/healthz")
require.Equalf(t, http.StatusServiceUnavailable, resp.StatusCode, "Was expecting status code 503 from health check, but got %d instead", resp.StatusCode)
assert.NotEmpty(t, hook.Entries, "Was expecting at least one log entry from health check, but got none")
expectedMsg := "Error serving health check request"
var foundEntry log.Entry
for _, entry := range hook.Entries {
if entry.Level == log.ErrorLevel &&
entry.Message == expectedMsg {
foundEntry = entry
break
}
}
require.NotEmpty(t, foundEntry, "Expected an error message '%s', but it was't found", expectedMsg)
actualErr, ok := foundEntry.Data["error"].(error)
require.True(t, ok, "Expected 'error' field to contain an error, but it doesn't")
assert.Equal(t, svcErrMsg, actualErr.Error(), "expected original error message '"+svcErrMsg+"', but got '"+actualErr.Error()+"'")
assert.Greater(t, foundEntry.Data["duration"].(time.Duration), time.Duration(0))
}

View File

@@ -430,7 +430,7 @@ func isContentLayer(mediaType string) bool {
func isCompressedLayer(mediaType string) bool {
// TODO: Is zstd something which is used in the wild? For now let's stick to these suffixes
return strings.HasSuffix(mediaType, "tar+gzip") || strings.HasSuffix(mediaType, "tar")
return strings.HasSuffix(mediaType, "tar+gzip") || strings.HasSuffix(mediaType, "tar.gzip") || strings.HasSuffix(mediaType, "tar")
}
func createTarFile(from, to string) error {
@@ -531,7 +531,7 @@ func (s *compressedLayerExtracterStore) Push(ctx context.Context, desc imagev1.D
}
defer os.RemoveAll(srcDir)
if strings.HasSuffix(desc.MediaType, "tar+gzip") {
if strings.HasSuffix(desc.MediaType, "tar+gzip") || strings.HasSuffix(desc.MediaType, "tar.gzip") {
err = files.Untgz(srcDir, content, s.maxSize, false)
} else {
err = files.Untar(srcDir, content, s.maxSize, false)

View File

@@ -254,6 +254,31 @@ func Test_nativeOCIClient_Extract(t *testing.T) {
disableManifestMaxExtractedSize: false,
},
},
{
name: "extraction with docker rootfs tar.gzip layer",
fields: fields{
allowedMediaTypes: []string{"application/vnd.docker.image.rootfs.diff.tar.gzip"},
},
args: args{
digestFunc: func(store *memory.Store) string {
layerBlob := createGzippedTarWithContent(t, "foo.yaml", "some content")
return generateManifest(t, store, layerConf{content.NewDescriptorFromBytes("application/vnd.docker.image.rootfs.diff.tar.gzip", layerBlob), layerBlob})
},
postValidationFunc: func(_, path string, _ Client, _ fields, _ args) {
manifestDir, err := os.ReadDir(path)
require.NoError(t, err)
require.Len(t, manifestDir, 1)
require.Equal(t, "foo.yaml", manifestDir[0].Name())
f, err := os.Open(filepath.Join(path, manifestDir[0].Name()))
require.NoError(t, err)
contents, err := io.ReadAll(f)
require.NoError(t, err)
require.Equal(t, "some content", string(contents))
},
manifestMaxExtractedSize: 1000,
disableManifestMaxExtractedSize: false,
},
},
{
name: "extraction with standard gzip layer using cache",
fields: fields{

View File

@@ -1286,13 +1286,13 @@ func (mgr *SettingsManager) GetSettings() (*ArgoCDSettings, error) {
var settings ArgoCDSettings
var errs []error
updateSettingsFromConfigMap(&settings, argoCDCM)
if err := mgr.updateSettingsFromSecret(&settings, argoCDSecret, secrets); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return &settings, errors.Join(errs...)
}
updateSettingsFromConfigMap(&settings, argoCDCM)
return &settings, nil
}

View File

@@ -330,23 +330,23 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload any) {
appIf := a.appsLister.Applications(nsFilter)
apps, err := appIf.List(labels.Everything())
if err != nil {
log.Warnf("Failed to list applications: %v", err)
log.Errorf("Failed to list applications: %v", err)
return
}
installationID, err := a.settingsSrc.GetInstallationID()
if err != nil {
log.Warnf("Failed to get installation ID: %v", err)
log.Errorf("Failed to get installation ID: %v", err)
return
}
trackingMethod, err := a.settingsSrc.GetTrackingMethod()
if err != nil {
log.Warnf("Failed to get trackingMethod: %v", err)
log.Errorf("Failed to get trackingMethod: %v", err)
return
}
appInstanceLabelKey, err := a.settingsSrc.GetAppInstanceLabelKey()
if err != nil {
log.Warnf("Failed to get appInstanceLabelKey: %v", err)
log.Errorf("Failed to get appInstanceLabelKey: %v", err)
return
}
@@ -362,41 +362,47 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload any) {
for _, webURL := range webURLs {
repoRegexp, err := GetWebURLRegex(webURL)
if err != nil {
log.Warnf("Failed to get repoRegexp: %s", err)
log.Errorf("Failed to get repoRegexp: %s", err)
continue
}
// iterate over apps and check if any files specified in their sources have changed
for _, app := range filteredApps {
// get all sources, including sync source and dry source if source hydrator is configured
sources := app.Spec.GetSources()
if app.Spec.SourceHydrator != nil {
drySource := app.Spec.SourceHydrator.GetDrySource()
if sourceRevisionHasChanged(drySource, revision, touchedHead) && sourceUsesURL(drySource, webURL, repoRegexp) {
refreshPaths := path.GetAppRefreshPaths(&app)
if path.AppFilesHaveChanged(refreshPaths, changedFiles) {
namespacedAppInterface := a.appClientset.ArgoprojV1alpha1().Applications(app.Namespace)
log.Infof("webhook trigger refresh app to hydrate '%s'", app.Name)
_, err = argo.RefreshApp(namespacedAppInterface, app.Name, v1alpha1.RefreshTypeNormal, true)
if err != nil {
log.Warnf("Failed to hydrate app '%s' for controller reprocessing: %v", app.Name, err)
continue
}
}
}
// we already have sync source, so add dry source if source hydrator is configured
sources = append(sources, app.Spec.SourceHydrator.GetDrySource())
}
for _, source := range app.Spec.GetSources() {
// iterate over all sources and check if any files specified in refresh paths have changed
for _, source := range sources {
if sourceRevisionHasChanged(source, revision, touchedHead) && sourceUsesURL(source, webURL, repoRegexp) {
refreshPaths := path.GetAppRefreshPaths(&app)
refreshPaths := path.GetSourceRefreshPaths(&app, source)
if path.AppFilesHaveChanged(refreshPaths, changedFiles) {
namespacedAppInterface := a.appClientset.ArgoprojV1alpha1().Applications(app.Namespace)
_, err = argo.RefreshApp(namespacedAppInterface, app.Name, v1alpha1.RefreshTypeNormal, true)
if err != nil {
log.Warnf("Failed to refresh app '%s' for controller reprocessing: %v", app.Name, err)
continue
hydrate := false
if app.Spec.SourceHydrator != nil {
drySource := app.Spec.SourceHydrator.GetDrySource()
if (&source).Equals(&drySource) {
hydrate = true
}
}
// No need to refresh multiple times if multiple sources match.
break
// refresh paths have changed, so we need to refresh the app
log.Infof("refreshing app '%s' from webhook", app.Name)
if hydrate {
// log if we need to hydrate the app
log.Infof("webhook trigger refresh app to hydrate '%s'", app.Name)
}
namespacedAppInterface := a.appClientset.ArgoprojV1alpha1().Applications(app.Namespace)
if _, err := argo.RefreshApp(namespacedAppInterface, app.Name, v1alpha1.RefreshTypeNormal, hydrate); err != nil {
log.Errorf("Failed to refresh app '%s': %v", app.Name, err)
}
break // we don't need to check other sources
} else if change.shaBefore != "" && change.shaAfter != "" {
if err := a.storePreviouslyCachedManifests(&app, change, trackingMethod, appInstanceLabelKey, installationID); err != nil {
log.Warnf("Failed to store cached manifests of previous revision for app '%s': %v", app.Name, err)
// update the cached manifests with the new revision cache key
if err := a.storePreviouslyCachedManifests(&app, change, trackingMethod, appInstanceLabelKey, installationID, source); err != nil {
log.Errorf("Failed to store cached manifests of previous revision for app '%s': %v", app.Name, err)
}
}
}
@@ -449,7 +455,7 @@ func getURLRegex(originalURL string, regexpFormat string) (*regexp.Regexp, error
return repoRegexp, nil
}
func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Application, change changeInfo, trackingMethod string, appInstanceLabelKey string, installationID string) error {
func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Application, change changeInfo, trackingMethod string, appInstanceLabelKey string, installationID string, source v1alpha1.ApplicationSource) error {
destCluster, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, a.db)
if err != nil {
return fmt.Errorf("error validating destination: %w", err)
@@ -472,7 +478,7 @@ func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Appl
if err != nil {
return fmt.Errorf("error getting ref sources: %w", err)
}
source := app.Spec.GetSource()
cache.LogDebugManifestCacheKeyFields("moving manifests cache", "webhook app revision changed", change.shaBefore, &source, refSources, &clusterInfo, app.Spec.Destination.Namespace, trackingMethod, appInstanceLabelKey, app.Name, nil)
if err := a.repoCache.SetNewRevisionManifests(change.shaAfter, change.shaBefore, &source, refSources, &clusterInfo, app.Spec.Destination.Namespace, trackingMethod, appInstanceLabelKey, app.Name, nil, installationID); err != nil {

View File

@@ -44,6 +44,7 @@ import (
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned/fake"
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
"github.com/argoproj/argo-cd/v3/reposerver/cache"
cacheutil "github.com/argoproj/argo-cd/v3/util/cache"
"github.com/argoproj/argo-cd/v3/util/settings"
@@ -182,64 +183,6 @@ func TestAzureDevOpsCommitEvent(t *testing.T) {
hook.Reset()
}
// TestGitHubCommitEvent_MultiSource_Refresh makes sure that a webhook will refresh a multi-source app when at least
// one source matches.
func TestGitHubCommitEvent_MultiSource_Refresh(t *testing.T) {
hook := test.NewGlobal()
var patched bool
reaction := func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patchAction := action.(kubetesting.PatchAction)
assert.Equal(t, "app-to-refresh", patchAction.GetName())
patched = true
return true, nil, nil
}
h := NewMockHandler(&reactorDef{"patch", "applications", reaction}, []string{}, &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "app-to-refresh",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/some/unrelated-repo",
Path: ".",
},
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: ".",
},
},
},
}, &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "app-to-ignore",
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/some/unrelated-repo",
Path: ".",
},
},
},
},
)
req := httptest.NewRequest(http.MethodPost, "/api/webhook", http.NoBody)
req.Header.Set("X-GitHub-Event", "push")
eventJSON, err := os.ReadFile("testdata/github-commit-event.json")
require.NoError(t, err)
req.Body = io.NopCloser(bytes.NewReader(eventJSON))
w := httptest.NewRecorder()
h.Handler(w, req)
close(h.queue)
h.Wait()
assert.Equal(t, http.StatusOK, w.Code)
expectedLogResult := "Requested app 'app-to-refresh' refresh"
assert.Equal(t, expectedLogResult, hook.LastEntry().Message)
assert.True(t, patched)
hook.Reset()
}
// TestGitHubCommitEvent_AppsInOtherNamespaces makes sure that webhooks properly find apps in the configured set of
// allowed namespaces when Apps are allowed in any namespace
func TestGitHubCommitEvent_AppsInOtherNamespaces(t *testing.T) {
@@ -338,72 +281,6 @@ func TestGitHubCommitEvent_AppsInOtherNamespaces(t *testing.T) {
hook.Reset()
}
// TestGitHubCommitEvent_Hydrate makes sure that a webhook will hydrate an app when dry source changed.
func TestGitHubCommitEvent_Hydrate(t *testing.T) {
hook := test.NewGlobal()
var patched bool
reaction := func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
patchAction := action.(kubetesting.PatchAction)
assert.Equal(t, "app-to-hydrate", patchAction.GetName())
patched = true
return true, nil, nil
}
h := NewMockHandler(&reactorDef{"patch", "applications", reaction}, []string{}, &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "app-to-hydrate",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSpec{
SourceHydrator: &v1alpha1.SourceHydrator{
DrySource: v1alpha1.DrySource{
RepoURL: "https://github.com/jessesuen/test-repo",
TargetRevision: "HEAD",
Path: ".",
},
SyncSource: v1alpha1.SyncSource{
TargetBranch: "environments/dev",
Path: ".",
},
HydrateTo: nil,
},
},
}, &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "app-to-ignore",
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/some/unrelated-repo",
Path: ".",
},
},
},
},
)
req := httptest.NewRequest(http.MethodPost, "/api/webhook", http.NoBody)
req.Header.Set("X-GitHub-Event", "push")
eventJSON, err := os.ReadFile("testdata/github-commit-event.json")
require.NoError(t, err)
req.Body = io.NopCloser(bytes.NewReader(eventJSON))
w := httptest.NewRecorder()
h.Handler(w, req)
close(h.queue)
h.Wait()
assert.Equal(t, http.StatusOK, w.Code)
assert.True(t, patched)
logMessages := make([]string, 0, len(hook.Entries))
for _, entry := range hook.Entries {
logMessages = append(logMessages, entry.Message)
}
assert.Contains(t, logMessages, "webhook trigger refresh app to hydrate 'app-to-hydrate'")
assert.NotContains(t, logMessages, "webhook trigger refresh app to hydrate 'app-to-ignore'")
hook.Reset()
}
func TestGitHubTagEvent(t *testing.T) {
hook := test.NewGlobal()
h := NewMockHandler(nil, []string{})
@@ -646,7 +523,8 @@ func Test_affectedRevisionInfo_appRevisionHasChanged(t *testing.T) {
// The payload's "push.changes[0].new.name" member seems to only have the branch name (based on the example payload).
// https://support.atlassian.com/bitbucket-cloud/docs/event-payloads/#EventPayloads-Push
var pl bitbucket.RepoPushPayload
_ = json.Unmarshal([]byte(fmt.Sprintf(`{"push":{"changes":[{"new":{"name":%q}}]}}`, branchName)), &pl)
err := json.Unmarshal([]byte(fmt.Sprintf(`{"push":{"changes":[{"new":{"name":%q}}]}}`, branchName)), &pl)
require.NoError(t, err)
return pl
}
@@ -878,6 +756,463 @@ func TestGitHubCommitEventMaxPayloadSize(t *testing.T) {
hook.Reset()
}
func TestHandleEvent(t *testing.T) {
t.Parallel()
tests := []struct {
name string
app *v1alpha1.Application
changedFile string // file that was changed in the webhook payload
hasRefresh bool // application has refresh annotation applied
hasHydrate bool // application has hydrate annotation applied
updateCache bool // cache should be updated with the new revision
}{
{
name: "single source without annotation - always refreshes",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "source/path",
TargetRevision: "HEAD",
},
},
},
},
changedFile: "source/path/app.yaml",
hasRefresh: true,
hasHydrate: false,
updateCache: false,
},
{
name: "single source with annotation - matching file triggers refresh",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
Annotations: map[string]string{
"argocd.argoproj.io/manifest-generate-paths": "deploy",
},
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "source/path",
TargetRevision: "HEAD",
},
},
},
},
changedFile: "source/path/deploy/app.yaml",
hasRefresh: true,
hasHydrate: false,
updateCache: false,
},
{
name: "single source with annotation - non-matching file updates cache",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
Annotations: map[string]string{
"argocd.argoproj.io/manifest-generate-paths": "manifests",
},
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "source/path",
TargetRevision: "HEAD",
},
},
},
},
changedFile: "source/path/other/app.yaml",
hasRefresh: false,
hasHydrate: false,
updateCache: true,
},
{
name: "single source with multiple paths annotation - matching subpath triggers refresh",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
Annotations: map[string]string{
"argocd.argoproj.io/manifest-generate-paths": "manifests;dev/deploy;other/path",
},
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "source/path",
TargetRevision: "HEAD",
},
},
},
},
changedFile: "source/path/dev/deploy/app.yaml",
hasRefresh: true,
hasHydrate: false,
updateCache: false,
},
{
name: "multi-source without annotation - always refreshes",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "helm-charts",
TargetRevision: "HEAD",
},
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "ksapps",
TargetRevision: "HEAD",
},
},
},
},
changedFile: "ksapps/app.yaml",
hasRefresh: true,
hasHydrate: false,
updateCache: false,
},
{
name: "multi-source with annotation - matching file triggers refresh",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
Annotations: map[string]string{
"argocd.argoproj.io/manifest-generate-paths": "components",
},
},
Spec: v1alpha1.ApplicationSpec{
Sources: v1alpha1.ApplicationSources{
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "helm-charts",
TargetRevision: "HEAD",
},
{
RepoURL: "https://github.com/jessesuen/test-repo",
Path: "ksapps",
TargetRevision: "HEAD",
},
},
},
},
changedFile: "ksapps/components/app.yaml",
hasRefresh: true,
hasHydrate: false,
updateCache: false,
},
{
name: "source hydrator sync source without annotation - refreshes when sync path matches",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSpec{
SourceHydrator: &v1alpha1.SourceHydrator{
DrySource: v1alpha1.DrySource{
RepoURL: "https://github.com/jessesuen/test-repo",
TargetRevision: "HEAD",
Path: "dry/path",
},
SyncSource: v1alpha1.SyncSource{
TargetBranch: "master",
Path: "sync/path",
},
},
},
},
changedFile: "sync/path/app.yaml",
hasRefresh: true,
hasHydrate: false,
updateCache: false,
},
{
name: "source hydrator dry source without annotation - always refreshes and hydrates",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSpec{
SourceHydrator: &v1alpha1.SourceHydrator{
DrySource: v1alpha1.DrySource{
RepoURL: "https://github.com/jessesuen/test-repo",
TargetRevision: "HEAD",
Path: "dry/path",
},
SyncSource: v1alpha1.SyncSource{
TargetBranch: "master",
Path: "sync/path",
},
},
},
},
changedFile: "other/path/app.yaml",
hasRefresh: true,
hasHydrate: true,
updateCache: false,
},
{
name: "source hydrator sync source with annotation - refresh only",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
Annotations: map[string]string{
"argocd.argoproj.io/manifest-generate-paths": "deploy",
},
},
Spec: v1alpha1.ApplicationSpec{
SourceHydrator: &v1alpha1.SourceHydrator{
DrySource: v1alpha1.DrySource{
RepoURL: "https://github.com/jessesuen/test-repo",
TargetRevision: "HEAD",
Path: "dry/path",
},
SyncSource: v1alpha1.SyncSource{
TargetBranch: "master",
Path: "sync/path",
},
},
},
},
changedFile: "sync/path/deploy/app.yaml",
hasRefresh: true,
hasHydrate: false,
updateCache: false,
},
{
name: "source hydrator dry source with annotation - refresh and hydrate",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
Annotations: map[string]string{
"argocd.argoproj.io/manifest-generate-paths": "deploy",
},
},
Spec: v1alpha1.ApplicationSpec{
SourceHydrator: &v1alpha1.SourceHydrator{
DrySource: v1alpha1.DrySource{
RepoURL: "https://github.com/jessesuen/test-repo",
TargetRevision: "HEAD",
Path: "dry/path",
},
SyncSource: v1alpha1.SyncSource{
TargetBranch: "master",
Path: "sync/path",
},
},
},
},
changedFile: "dry/path/deploy/app.yaml",
hasRefresh: true,
hasHydrate: true,
updateCache: false,
},
{
name: "source hydrator dry source with annotation - non-matching file updates cache",
app: &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test-app",
Namespace: "argocd",
Annotations: map[string]string{
"argocd.argoproj.io/manifest-generate-paths": "deploy",
},
},
Spec: v1alpha1.ApplicationSpec{
SourceHydrator: &v1alpha1.SourceHydrator{
DrySource: v1alpha1.DrySource{
RepoURL: "https://github.com/jessesuen/test-repo",
TargetRevision: "HEAD",
Path: "dry/path",
},
SyncSource: v1alpha1.SyncSource{
TargetBranch: "master",
Path: "sync/path",
},
},
},
},
changedFile: "dry/path/other/app.yaml",
hasRefresh: false,
hasHydrate: false,
updateCache: true,
},
}
for _, tt := range tests {
ttc := tt
t.Run(ttc.name, func(t *testing.T) {
t.Parallel()
var patchData []byte
var patched bool
reaction := func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
if action.GetVerb() == "patch" {
patchAction := action.(kubetesting.PatchAction)
patchData = patchAction.GetPatch()
patched = true
}
return true, nil, nil
}
// Setup cache
inMemoryCache := cacheutil.NewInMemoryCache(1 * time.Hour)
cacheClient := cacheutil.NewCache(inMemoryCache)
repoCache := cache.NewCache(
cacheClient,
1*time.Minute,
1*time.Minute,
10*time.Second,
)
// Pre-populate cache with beforeSHA if we're testing cache updates
if ttc.updateCache {
var source *v1alpha1.ApplicationSource
if ttc.app.Spec.SourceHydrator != nil {
drySource := ttc.app.Spec.SourceHydrator.GetDrySource()
source = &drySource
} else if len(ttc.app.Spec.Sources) > 0 {
source = &ttc.app.Spec.Sources[0]
}
if source != nil {
setupTestCache(t, repoCache, ttc.app.Name, source, []string{"test-manifest"})
}
}
// Setup server cache with cluster info
serverCache := servercache.NewCache(appstate.NewCache(cacheClient, time.Minute), time.Minute, time.Minute)
mockDB := &mocks.ArgoDB{}
// Set destination if not present (required for cache updates)
if ttc.app.Spec.Destination.Server == "" {
ttc.app.Spec.Destination.Server = testClusterURL
}
mockDB.EXPECT().GetCluster(mock.Anything, testClusterURL).Return(&v1alpha1.Cluster{
Server: testClusterURL,
Info: v1alpha1.ClusterInfo{
ServerVersion: "1.28.0",
ConnectionState: v1alpha1.ConnectionState{Status: v1alpha1.ConnectionStatusSuccessful},
APIVersions: []string{},
},
}, nil).Maybe()
err := serverCache.SetClusterInfo(testClusterURL, &v1alpha1.ClusterInfo{
ServerVersion: "1.28.0",
ConnectionState: v1alpha1.ConnectionState{Status: v1alpha1.ConnectionStatusSuccessful},
APIVersions: []string{},
})
require.NoError(t, err)
// Create handler with reaction
appClientset := appclientset.NewSimpleClientset(ttc.app)
defaultReactor := appClientset.ReactionChain[0]
appClientset.ReactionChain = nil
appClientset.AddReactor("list", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
return defaultReactor.React(action)
})
appClientset.AddReactor("patch", "applications", reaction)
h := NewHandler(
"argocd",
[]string{},
10,
appClientset,
&fakeAppsLister{clientset: appClientset},
&settings.ArgoCDSettings{},
&fakeSettingsSrc{},
repoCache,
serverCache,
mockDB,
int64(50)*1024*1024,
)
// Create payload with the changed file
payload := createTestPayload(ttc.changedFile)
req := httptest.NewRequest(http.MethodPost, "/api/webhook", http.NoBody)
req.Header.Set("X-GitHub-Event", "push")
req.Body = io.NopCloser(bytes.NewReader(payload))
w := httptest.NewRecorder()
h.Handler(w, req)
close(h.queue)
h.Wait()
assert.Equal(t, http.StatusOK, w.Code)
// Verify refresh behavior
assert.Equal(t, ttc.hasRefresh, patched, "patch status mismatch for test: %s", ttc.name)
if patched && patchData != nil {
verifyAnnotations(t, patchData, ttc.hasRefresh, ttc.hasHydrate)
}
// Verify cache update behavior
if ttc.updateCache {
var source *v1alpha1.ApplicationSource
if ttc.app.Spec.SourceHydrator != nil {
drySource := ttc.app.Spec.SourceHydrator.GetDrySource()
source = &drySource
} else if len(ttc.app.Spec.Sources) > 0 {
source = &ttc.app.Spec.Sources[0]
}
if source != nil {
// Verify cache was updated with afterSHA
clusterInfo := &mockClusterInfo{}
var afterManifests cache.CachedManifestResponse
err := repoCache.GetManifests(testAfterSHA, source, nil, clusterInfo, "", "", testAppLabelKey, ttc.app.Name, &afterManifests, nil, "")
require.NoError(t, err, "cache should be updated with afterSHA")
if err == nil {
assert.Equal(t, testAfterSHA, afterManifests.ManifestResponse.Revision, "cached revision should match afterSHA")
}
}
}
})
}
}
// createTestPayload creates a GitHub push event payload with the specified changed file
func createTestPayload(changedFile string) []byte {
payload := fmt.Sprintf(`{
"ref": "refs/heads/master",
"before": "%s",
"after": "%s",
"repository": {
"html_url": "https://github.com/jessesuen/test-repo",
"default_branch": "master"
},
"commits": [
{
"added": [],
"modified": ["%s"],
"removed": []
}
]
}`, testBeforeSHA, testAfterSHA, changedFile)
return []byte(payload)
}
func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
httpmock.Activate()
defer httpmock.DeactivateAndReset()
@@ -922,10 +1257,9 @@ func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
"oldHash": "abcdef",
"newHash": "ghijkl",
})
if err != nil {
require.NoError(t, err)
}
_ = json.Unmarshal(doc.Bytes(), &pl)
require.NoError(t, err)
err = json.Unmarshal(doc.Bytes(), &pl)
require.NoError(t, err)
return pl
}
@@ -1238,3 +1572,72 @@ func getDiffstatResponderFn() func(req *http.Request) (*http.Response, error) {
return resp, nil
}
}
// mockClusterInfo implements cache.ClusterRuntimeInfo for testing
type mockClusterInfo struct{}
func (m *mockClusterInfo) GetApiVersions() []string { return []string{} } //nolint:revive // interface method name
func (m *mockClusterInfo) GetKubeVersion() string { return "1.28.0" }
// Common test constants
const (
testBeforeSHA = "d5c1ffa8e294bc18c639bfb4e0df499251034414"
testAfterSHA = "63738bb582c8b540af7bcfc18f87c575c3ed66e0"
testClusterURL = "https://kubernetes.default.svc"
testAppLabelKey = "mycompany.com/appname"
)
// verifyAnnotations is a helper that checks if the expected annotations are present in patch data
func verifyAnnotations(t *testing.T, patchData []byte, expectRefresh bool, expectHydrate bool) {
t.Helper()
if patchData == nil {
if expectRefresh {
t.Error("expected app to be patched but patchData is nil")
}
return
}
var patchMap map[string]any
err := json.Unmarshal(patchData, &patchMap)
require.NoError(t, err)
metadata, hasMetadata := patchMap["metadata"].(map[string]any)
require.True(t, hasMetadata, "patch should have metadata")
annotations, hasAnnotations := metadata["annotations"].(map[string]any)
require.True(t, hasAnnotations, "patch should have annotations")
// Check refresh annotation
refreshValue, hasRefresh := annotations["argocd.argoproj.io/refresh"]
if expectRefresh {
assert.True(t, hasRefresh, "should have refresh annotation")
assert.Equal(t, "normal", refreshValue, "refresh annotation should be 'normal'")
} else {
assert.False(t, hasRefresh, "should not have refresh annotation")
}
// Check hydrate annotation
hydrateValue, hasHydrate := annotations["argocd.argoproj.io/hydrate"]
if expectHydrate {
assert.True(t, hasHydrate, "should have hydrate annotation")
assert.Equal(t, "normal", hydrateValue, "hydrate annotation should be 'normal'")
} else {
assert.False(t, hasHydrate, "should not have hydrate annotation")
}
}
// setupTestCache is a helper that creates and populates a test cache
func setupTestCache(t *testing.T, repoCache *cache.Cache, appName string, source *v1alpha1.ApplicationSource, manifests []string) {
t.Helper()
clusterInfo := &mockClusterInfo{}
dummyManifests := &cache.CachedManifestResponse{
ManifestResponse: &apiclient.ManifestResponse{
Revision: testBeforeSHA,
Manifests: manifests,
Namespace: "",
Server: testClusterURL,
},
}
err := repoCache.SetManifests(testBeforeSHA, source, nil, clusterInfo, "", "", testAppLabelKey, appName, dummyManifests, nil, "")
require.NoError(t, err)
}