mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-24 11:38:46 +01:00
Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48549a2035 | ||
|
|
10c3fd02f4 | ||
|
|
ca08f90e96 | ||
|
|
1f03b27fd5 | ||
|
|
9c128e2d4c | ||
|
|
75eddbd910 | ||
|
|
65b029342d | ||
|
|
2ff406ae33 | ||
|
|
76fc92f655 | ||
|
|
ad117b88a6 | ||
|
|
508da9c791 | ||
|
|
20866f4557 | ||
|
|
e3b108b738 | ||
|
|
c56f4400f2 | ||
|
|
e9d03a633e | ||
|
|
030b4f982b | ||
|
|
fafbd44489 | ||
|
|
d7d9674e33 | ||
|
|
e6f54030f0 | ||
|
|
b4146969ed | ||
|
|
51c6375130 | ||
|
|
b67eb40a45 | ||
|
|
8a0633b74a | ||
|
|
0d4f505954 | ||
|
|
2b6251dfed | ||
|
|
8f903c3a11 | ||
|
|
8d0dde1388 | ||
|
|
784f62ca6d | ||
|
|
33b5043405 | ||
|
|
88fe638aff | ||
|
|
a29703877e | ||
|
|
95e7cdb16f | ||
|
|
122f4db3db | ||
|
|
2d65b26420 | ||
|
|
0ace9bb9a3 | ||
|
|
6398ec3dcb | ||
|
|
732b16fb2a | ||
|
|
024c7e6020 | ||
|
|
26b7fb2c61 |
8
.github/workflows/ci-build.yaml
vendored
8
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.0'
|
||||
GOLANG_VERSION: '1.25.6'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -414,14 +414,14 @@ jobs:
|
||||
# latest: true means that this version mush upload the coverage report to codecov.io
|
||||
# We designate the latest version because we only collect code coverage for that version.
|
||||
k3s:
|
||||
- version: v1.33.1
|
||||
- version: v1.34.2
|
||||
latest: true
|
||||
- version: v1.33.1
|
||||
latest: false
|
||||
- version: v1.32.1
|
||||
latest: false
|
||||
- version: v1.31.0
|
||||
latest: false
|
||||
- version: v1.30.4
|
||||
latest: false
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
|
||||
4
.github/workflows/image.yaml
vendored
4
.github/workflows/image.yaml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.0
|
||||
go-version: 1.25.6
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.0
|
||||
go-version: 1.25.6
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
4
.github/workflows/release.yaml
vendored
4
.github/workflows/release.yaml
vendored
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.0' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.25.6' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.0
|
||||
go-version: 1.25.6
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
9
.github/workflows/renovate.yaml
vendored
9
.github/workflows/renovate.yaml
vendored
@@ -19,7 +19,14 @@ jobs:
|
||||
private-key: ${{ secrets.RENOVATE_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # 6.0.1
|
||||
|
||||
# Some codegen commands require Go to be setup
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.6
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@f8af9272cd94a4637c29f60dea8731afd3134473 #43.0.12
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -20,6 +20,7 @@ node_modules/
|
||||
.kube/
|
||||
./test/cmp/*.sock
|
||||
.envrc.remote
|
||||
.mirrord/
|
||||
.*.swp
|
||||
rerunreport.txt
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS builder
|
||||
FROM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6
|
||||
FROM docker.io/library/golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@ const (
|
||||
var defaultPreservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
argov1alpha1.AnnotationKeyHydrate,
|
||||
}
|
||||
|
||||
type deleteInOrder struct {
|
||||
@@ -651,8 +652,9 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
Watches(
|
||||
&corev1.Secret{},
|
||||
&clusterSecretEventHandler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: r.ApplicationSetNamespaces,
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -589,6 +589,72 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ensure that hydrate annotation is preserved from an existing app",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
Annotations: map[string]string{
|
||||
"annot-key": "annot-value",
|
||||
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
Annotations: map[string]string{
|
||||
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ensure that configured preserved annotations are preserved from an existing app",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -22,8 +23,9 @@ import (
|
||||
// requeue any related ApplicationSets.
|
||||
type clusterSecretEventHandler struct {
|
||||
// handler.EnqueueRequestForOwner
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
ApplicationSetNamespaces []string
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
@@ -68,6 +70,10 @@ func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Contex
|
||||
|
||||
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
|
||||
for _, appSet := range appSetList.Items {
|
||||
if !utils.IsNamespaceAllowed(h.ApplicationSetNamespaces, appSet.GetNamespace()) {
|
||||
// Ignore it as not part of the allowed list of namespaces in which to watch Appsets
|
||||
continue
|
||||
}
|
||||
foundClusterGenerator := false
|
||||
for _, generator := range appSet.Spec.Generators {
|
||||
if generator.Clusters != nil {
|
||||
|
||||
@@ -137,7 +137,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
@@ -171,9 +171,37 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cluster generators in other namespaces should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "my-namespace-not-allowed",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
@@ -552,8 +580,9 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
|
||||
|
||||
handler := &clusterSecretEventHandler{
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: []string{"argocd"},
|
||||
}
|
||||
|
||||
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
|
||||
const gitAttributesContents = `*/README.md linguist-generated=true
|
||||
*/hydrator.metadata linguist-generated=true`
|
||||
const gitAttributesContents = `**/README.md linguist-generated=true
|
||||
**/hydrator.metadata linguist-generated=true`
|
||||
|
||||
func init() {
|
||||
// Avoid allowing the user to learn things about the environment.
|
||||
|
||||
@@ -7,8 +7,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -234,6 +236,74 @@ func TestWriteGitAttributes(t *testing.T) {
|
||||
gitAttributesPath := filepath.Join(root.Name(), ".gitattributes")
|
||||
gitAttributesBytes, err := os.ReadFile(gitAttributesPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/hydrator.metadata linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "hydrator.metadata linguist-generated=true")
|
||||
}
|
||||
|
||||
func TestWriteGitAttributes_MatchesAllDepths(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
err := writeGitAttributes(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The gitattributes pattern needs to match files at all depths:
|
||||
// - hydrator.metadata (root level)
|
||||
// - path1/hydrator.metadata (one level deep)
|
||||
// - path1/nested/deep/hydrator.metadata (multiple levels deep)
|
||||
// Same for README.md files
|
||||
//
|
||||
// The pattern "**/hydrator.metadata" matches at any depth including root
|
||||
// The pattern "*/hydrator.metadata" only matches exactly one directory level deep
|
||||
|
||||
// Test actual Git behavior using git check-attr
|
||||
// Initialize a git repo
|
||||
ctx := t.Context()
|
||||
repoPath := root.Name()
|
||||
cmd := exec.CommandContext(ctx, "git", "init")
|
||||
cmd.Dir = repoPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, "Failed to init git repo: %s", string(output))
|
||||
|
||||
// Test files at different depths
|
||||
testCases := []struct {
|
||||
path string
|
||||
shouldMatch bool
|
||||
description string
|
||||
}{
|
||||
{"hydrator.metadata", true, "root level hydrator.metadata"},
|
||||
{"README.md", true, "root level README.md"},
|
||||
{"path1/hydrator.metadata", true, "one level deep hydrator.metadata"},
|
||||
{"path1/README.md", true, "one level deep README.md"},
|
||||
{"path1/nested/hydrator.metadata", true, "two levels deep hydrator.metadata"},
|
||||
{"path1/nested/README.md", true, "two levels deep README.md"},
|
||||
{"path1/nested/deep/hydrator.metadata", true, "three levels deep hydrator.metadata"},
|
||||
{"path1/nested/deep/README.md", true, "three levels deep README.md"},
|
||||
{"manifest.yaml", false, "manifest.yaml should not match"},
|
||||
{"path1/manifest.yaml", false, "nested manifest.yaml should not match"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// Use git check-attr to verify if linguist-generated attribute is set
|
||||
cmd := exec.CommandContext(ctx, "git", "check-attr", "linguist-generated", tc.path)
|
||||
cmd.Dir = repoPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, "Failed to run git check-attr: %s", string(output))
|
||||
|
||||
// Output format: <path>: <attribute>: <value>
|
||||
// Example: "hydrator.metadata: linguist-generated: true"
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
if tc.shouldMatch {
|
||||
expectedOutput := tc.path + ": linguist-generated: true"
|
||||
assert.Equal(t, expectedOutput, outputStr,
|
||||
"File %s should have linguist-generated=true attribute", tc.path)
|
||||
} else {
|
||||
// Attribute should be unspecified
|
||||
expectedOutput := tc.path + ": linguist-generated: unspecified"
|
||||
assert.Equal(t, expectedOutput, outputStr,
|
||||
"File %s should not have linguist-generated=true attribute", tc.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1508,8 +1508,18 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
// if we just completed an operation, force a refresh so that UI will report up-to-date
|
||||
// sync/health information
|
||||
if _, err := cache.MetaNamespaceKeyFunc(app); err == nil {
|
||||
// force app refresh with using CompareWithLatest comparison type and trigger app reconciliation loop
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatestForceResolve.Pointer(), nil)
|
||||
var compareWith CompareWith
|
||||
if state.Operation.InitiatedBy.Automated {
|
||||
// Do not force revision resolution on automated operations because
|
||||
// this would cause excessive Ls-Remote requests on monorepo commits
|
||||
compareWith = CompareWithLatest
|
||||
} else {
|
||||
// Force app refresh with using most recent resolved revision after sync,
|
||||
// so UI won't show a just synced application being out of sync if it was
|
||||
// synced after commit but before app. refresh (see #18153)
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
}
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), compareWith.Pointer(), nil)
|
||||
} else {
|
||||
logCtx.Warnf("Fails to requeue application: %v", err)
|
||||
}
|
||||
|
||||
@@ -2321,6 +2321,41 @@ func TestProcessRequestedAppOperation_Successful(t *testing.T) {
|
||||
assert.Equal(t, CompareWithLatestForceResolve, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppAutomatedOperation_Successful(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
InitiatedBy: v1alpha1.OperationInitiator{
|
||||
Automated: true,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponses: []*apiclient.ManifestResponse{{
|
||||
Manifests: []string{},
|
||||
}},
|
||||
}, nil)
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
receivedPatch := map[string]any{}
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
|
||||
assert.Equal(t, "successfully synced (no more tasks)", message)
|
||||
ok, level := ctrl.isRefreshRequested(ctrl.toAppKey(app.Name))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, CompareWithLatest, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_SyncTimeout(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -260,7 +260,7 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
|
||||
Revision: revision,
|
||||
SyncedRevision: syncedRevision,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
Paths: path.GetAppRefreshPaths(app),
|
||||
Paths: path.GetSourceRefreshPaths(app, source),
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: appNamespace,
|
||||
|
||||
@@ -18,9 +18,11 @@ These are the upcoming releases dates:
|
||||
| v2.13 | Monday, Sep. 16, 2024 | Monday, Nov. 4, 2024 | [Regina Voloshin](https://github.com/reggie-k) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/19513) |
|
||||
| v2.14 | Monday, Dec. 16, 2024 | Monday, Feb. 3, 2025 | [Ryan Umstead](https://github.com/rumstead) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/20869) |
|
||||
| v3.0 | Monday, Mar. 17, 2025 | Tuesday, May 6, 2025 | [Regina Voloshin](https://github.com/reggie-k) | | [checklist](https://github.com/argoproj/argo-cd/issues/21735) |
|
||||
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](#) |
|
||||
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | | [checklist](#) |
|
||||
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | | |
|
||||
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](https://github.com/argoproj/argo-cd/issues/23347) |
|
||||
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | [Michael Crenshaw](https://github.com/crenshaw-dev) | [checklist](https://github.com/argoproj/argo-cd/issues/24539) |
|
||||
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | [Peter Jiang](https://github.com/pjiang-dev) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/25211) |
|
||||
| v3.4 | Monday, Mar. 16, 2026 | Monday, May. 4, 2026 | | |
|
||||
| v3.5 | Monday, Jun. 15, 2026 | Monday, Aug. 3, 2026 | | |
|
||||
|
||||
Actual release dates might differ from the plan by a few days.
|
||||
|
||||
|
||||
@@ -11,6 +11,10 @@ The notification service is used to push events to [Alertmanager](https://github
|
||||
* `basicAuth` - optional, server auth
|
||||
* `bearerToken` - optional, server auth
|
||||
* `timeout` - optional, the timeout in seconds used when sending alerts, default is "3 seconds"
|
||||
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
|
||||
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
|
||||
* `maxConnsPerHost` - optional, maximum total connections per host.
|
||||
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing.
|
||||
|
||||
`basicAuth` or `bearerToken` is used for authentication, you can choose one. If the two are set at the same time, `basicAuth` takes precedence over `bearerToken`.
|
||||
|
||||
|
||||
@@ -12,6 +12,19 @@ The Email notification service sends email notifications using SMTP protocol and
|
||||
* `html` - optional bool, true or false
|
||||
* `insecure_skip_verify` - optional bool, true or false
|
||||
|
||||
### Using Gmail
|
||||
|
||||
When configuring Gmail as the SMTP service:
|
||||
|
||||
* `username` - Must be your Gmail address.
|
||||
* `password` - Use an App Password, not your regular Gmail password.
|
||||
|
||||
To Generate an app password, follow this link https://myaccount.google.com/apppasswords
|
||||
|
||||
!!! note
|
||||
This applies to personal Gmail accounts (non-Google Workspace). For Google Workspace users, SMTP settings
|
||||
and authentication methods may differ.
|
||||
|
||||
## Example
|
||||
|
||||
The following snippet contains sample Gmail service configuration:
|
||||
@@ -23,11 +36,11 @@ metadata:
|
||||
name: argocd-notifications-cm
|
||||
data:
|
||||
service.email.gmail: |
|
||||
username: $email-username
|
||||
password: $email-password
|
||||
username: $username
|
||||
password: $password
|
||||
host: smtp.gmail.com
|
||||
port: 465
|
||||
from: $email-username
|
||||
from: $email-address
|
||||
```
|
||||
|
||||
Without authentication:
|
||||
@@ -41,7 +54,7 @@ data:
|
||||
service.email.example: |
|
||||
host: smtp.example.com
|
||||
port: 587
|
||||
from: $email-username
|
||||
from: $email-address
|
||||
```
|
||||
|
||||
## Template
|
||||
|
||||
@@ -8,6 +8,10 @@ The GitHub notification service changes commit status using [GitHub Apps](https:
|
||||
- `installationID` - the app installation id
|
||||
- `privateKey` - the app private key
|
||||
- `enterpriseBaseURL` - optional URL, e.g. https://git.example.com/api/v3
|
||||
- `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
|
||||
- `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
|
||||
- `maxConnsPerHost` - optional, maximum total connections per host.
|
||||
- `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing.
|
||||
|
||||
> ⚠️ _NOTE:_ Specifying `/api/v3` in the `enterpriseBaseURL` is required until [argoproj/notifications-engine#205](https://github.com/argoproj/notifications-engine/issues/205) is resolved.
|
||||
|
||||
|
||||
@@ -9,6 +9,10 @@ Available parameters :
|
||||
* `apiURL` - the server url, e.g. https://grafana.example.com
|
||||
* `apiKey` - the API key for the serviceaccount
|
||||
* `insecureSkipVerify` - optional bool, true or false
|
||||
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
|
||||
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
|
||||
* `maxConnsPerHost` - optional, maximum total connections per host.
|
||||
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing.
|
||||
|
||||
1. Login to your Grafana instance as `admin`
|
||||
2. On the left menu, go to Configuration / API Keys
|
||||
|
||||
@@ -5,6 +5,10 @@
|
||||
* `apiURL` - the server url, e.g. https://mattermost.example.com
|
||||
* `token` - the bot token
|
||||
* `insecureSkipVerify` - optional bool, true or false
|
||||
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
|
||||
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
|
||||
* `maxConnsPerHost` - optional, maximum total connections per host.
|
||||
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing, e.g. '90s'.
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
|
||||
* `apiURL` - the api server url, e.g. https://api.newrelic.com
|
||||
* `apiKey` - a [NewRelic ApiKey](https://docs.newrelic.com/docs/apis/rest-api-v2/get-started/introduction-new-relic-rest-api-v2/#api_key)
|
||||
* `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
|
||||
* `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
|
||||
* `maxConnsPerHost` - optional, maximum total connections per host.
|
||||
* `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing, e.g. '90s'.
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
@@ -47,7 +47,8 @@ metadata:
|
||||
* [Grafana](./grafana.md)
|
||||
* [Webhook](./webhook.md)
|
||||
* [Telegram](./telegram.md)
|
||||
* [Teams](./teams.md)
|
||||
* [Teams (Office 365 Connectors)](./teams.md) - Legacy service (deprecated, retires March 31, 2026)
|
||||
* [Teams Workflows](./teams-workflows.md) - Recommended replacement for Office 365 Connectors
|
||||
* [Google Chat](./googlechat.md)
|
||||
* [Rocket.Chat](./rocketchat.md)
|
||||
* [Pushover](./pushover.md)
|
||||
|
||||
@@ -62,6 +62,8 @@ The parameters for the PagerDuty configuration in the template generally match w
|
||||
* `group` - Logical grouping of components of a service.
|
||||
* `class` - The class/type of the event.
|
||||
* `url` - The URL that should be used for the link "View in ArgoCD" in PagerDuty.
|
||||
* `dedupKey` - A string used by PagerDuty to deduplicate and correlate events. Events with the same `dedupKey` will be grouped into the same incident. If omitted, PagerDuty will create a new incident for each event.
|
||||
|
||||
|
||||
The `timestamp` and `custom_details` parameters are not currently supported.
|
||||
|
||||
|
||||
@@ -16,6 +16,11 @@ The Slack notification service configuration includes following settings:
|
||||
| `token` | **True** | `string` | The app's OAuth access token. | `xoxb-1234567890-1234567890123-5n38u5ed63fgzqlvuyxvxcx6` |
|
||||
| `username` | False | `string` | The app username. | `argocd` |
|
||||
| `disableUnfurl` | False | `bool` | Disable slack unfurling links in messages | `true` |
|
||||
| `maxIdleConns` | False | `int` | Maximum number of idle (keep-alive) connections across all hosts. | — |
|
||||
| `maxIdleConnsPerHost` | False | `int` | Maximum number of idle (keep-alive) connections per host. | — |
|
||||
| `maxConnsPerHost` | False | `int` | Maximum total connections per host. | — |
|
||||
| `idleConnTimeout` | False | `string` | Maximum amount of time an idle (keep-alive) connection will remain open before closing (e.g., `90s`). | — |
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
370
docs/operator-manual/notifications/services/teams-workflows.md
Executable file
370
docs/operator-manual/notifications/services/teams-workflows.md
Executable file
@@ -0,0 +1,370 @@
|
||||
# Teams Workflows
|
||||
|
||||
## Overview
|
||||
|
||||
The Teams Workflows notification service sends message notifications using Microsoft Teams Workflows (Power Automate). This is the recommended replacement for the legacy Office 365 Connectors service, which will be retired on March 31, 2026.
|
||||
|
||||
## Parameters
|
||||
|
||||
The Teams Workflows notification service requires specifying the following settings:
|
||||
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://api.powerautomate.com/webhook/...`
|
||||
|
||||
## Supported Webhook URL Formats
|
||||
|
||||
The service supports the following Microsoft Teams Workflows webhook URL patterns:
|
||||
|
||||
- `https://api.powerautomate.com/...`
|
||||
- `https://api.powerplatform.com/...`
|
||||
- `https://flow.microsoft.com/...`
|
||||
- URLs containing `/powerautomate/` in the path
|
||||
|
||||
## Configuration
|
||||
|
||||
1. Open `Teams` and go to the channel you wish to set notifications for
|
||||
2. Click on the 3 dots next to the channel name
|
||||
3. Select`Workflows`
|
||||
4. Click on `Manage`
|
||||
5. Click `New flow`
|
||||
6. Write `Send webhook alerts to a channel` in the search bar or select it from the template list
|
||||
7. Choose your team and channel
|
||||
8. Configure the webhook name and settings
|
||||
9. Copy the webhook URL (it will be from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`)
|
||||
10. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
data:
|
||||
service.teams-workflows: |
|
||||
recipientUrls:
|
||||
channelName: $channel-workflows-url
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
channel-workflows-url: https://api.powerautomate.com/webhook/your-webhook-id
|
||||
```
|
||||
|
||||
11. Create subscription for your Teams Workflows integration:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
|
||||
```
|
||||
|
||||
## Channel Support
|
||||
|
||||
- ✅ Standard Teams channels
|
||||
- ✅ Shared channels (as of December 2025)
|
||||
- ✅ Private channels (as of December 2025)
|
||||
|
||||
Teams Workflows provides enhanced channel support compared to Office 365 Connectors, allowing you to post to shared and private channels in addition to standard channels.
|
||||
|
||||
## Adaptive Card Format
|
||||
|
||||
The Teams Workflows service uses **Adaptive Cards** exclusively, which is the modern, flexible card format for Microsoft Teams. All notifications are automatically converted to Adaptive Card format and wrapped in the required message envelope.
|
||||
|
||||
### Option 1: Using Template Fields (Recommended)
|
||||
|
||||
The service automatically converts template fields to Adaptive Card format. This is the simplest and most maintainable approach:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams-workflows:
|
||||
# ThemeColor supports Adaptive Card semantic colors: "Good", "Warning", "Attention", "Accent"
|
||||
# or hex colors like "#000080"
|
||||
themeColor: "Good"
|
||||
title: Application {{.app.metadata.name}} has been successfully synced
|
||||
text: Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.
|
||||
summary: "{{.app.metadata.name}} sync succeeded"
|
||||
facts: |
|
||||
[{
|
||||
"name": "Sync Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
}, {
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}]
|
||||
sections: |
|
||||
[{
|
||||
"facts": [
|
||||
{
|
||||
"name": "Namespace",
|
||||
"value": "{{.app.metadata.namespace}}"
|
||||
},
|
||||
{
|
||||
"name": "Cluster",
|
||||
"value": "{{.app.spec.destination.server}}"
|
||||
}
|
||||
]
|
||||
}]
|
||||
potentialAction: |-
|
||||
[{
|
||||
"@type": "OpenUri",
|
||||
"name": "View in Argo CD",
|
||||
"targets": [{
|
||||
"os": "default",
|
||||
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
- `title` → Converted to a large, bold TextBlock
|
||||
- `text` → Converted to a regular TextBlock
|
||||
- `facts` → Converted to a FactSet element
|
||||
- `sections` → Facts within sections are extracted and converted to FactSet elements
|
||||
- `potentialAction` → OpenUri actions are converted to Action.OpenUrl
|
||||
- `themeColor` → Applied to the title TextBlock (supports semantic colors like "Good", "Warning", "Attention", "Accent" or hex colors)
|
||||
|
||||
### Option 2: Custom Adaptive Card JSON
|
||||
|
||||
For full control and advanced features, you can provide a complete Adaptive Card JSON template:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams-workflows:
|
||||
adaptiveCard: |
|
||||
{
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.4",
|
||||
"body": [
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}} synced successfully",
|
||||
"size": "Large",
|
||||
"weight": "Bolder",
|
||||
"color": "Good"
|
||||
},
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.",
|
||||
"wrap": true
|
||||
},
|
||||
{
|
||||
"type": "FactSet",
|
||||
"facts": [
|
||||
{
|
||||
"title": "Sync Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"type": "Action.OpenUrl",
|
||||
"title": "View in Argo CD",
|
||||
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** When using `adaptiveCard`, you only need to provide the AdaptiveCard JSON structure (not the full message envelope). The service automatically wraps it in the required `message` + `attachments` format for Teams Workflows.
|
||||
|
||||
**Important:** If you provide `adaptiveCard`, it takes precedence over all other template fields (`title`, `text`, `facts`, etc.).
|
||||
|
||||
## Template Fields
|
||||
|
||||
The Teams Workflows service supports the following template fields, which are automatically converted to Adaptive Card format:
|
||||
|
||||
### Standard Fields
|
||||
|
||||
- `title` - Message title (converted to large, bold TextBlock)
|
||||
- `text` - Message text content (converted to TextBlock)
|
||||
- `summary` - Summary text (currently not used in Adaptive Cards, but preserved for compatibility)
|
||||
- `themeColor` - Color for the title. Supports:
|
||||
- Semantic colors: `"Good"` (green), `"Warning"` (yellow), `"Attention"` (red), `"Accent"` (blue)
|
||||
- Hex colors: `"#000080"`, `"#FF0000"`, etc.
|
||||
- `facts` - JSON array of fact key-value pairs (converted to FactSet)
|
||||
```yaml
|
||||
facts: |
|
||||
[{
|
||||
"name": "Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
}]
|
||||
```
|
||||
- `sections` - JSON array of sections containing facts (facts are extracted and converted to FactSet)
|
||||
```yaml
|
||||
sections: |
|
||||
[{
|
||||
"facts": [{
|
||||
"name": "Namespace",
|
||||
"value": "{{.app.metadata.namespace}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
- `potentialAction` - JSON array of action buttons (OpenUri actions converted to Action.OpenUrl)
|
||||
```yaml
|
||||
potentialAction: |-
|
||||
[{
|
||||
"@type": "OpenUri",
|
||||
"name": "View Details",
|
||||
"targets": [{
|
||||
"os": "default",
|
||||
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
|
||||
### Advanced Fields
|
||||
|
||||
- `adaptiveCard` - Complete Adaptive Card JSON template (takes precedence over all other fields)
|
||||
- Only provide the AdaptiveCard structure, not the message envelope
|
||||
- Supports full Adaptive Card 1.4 specification
|
||||
- Allows access to all Adaptive Card features (containers, columns, images, etc.)
|
||||
|
||||
- `template` - Raw JSON template (legacy, use `adaptiveCard` instead)
|
||||
|
||||
### Field Conversion Details
|
||||
|
||||
| Template Field | Adaptive Card Element | Notes |
|
||||
|---------------|----------------------|-------|
|
||||
| `title` | `TextBlock` with `size: "Large"`, `weight: "Bolder"` | ThemeColor applied to this element |
|
||||
| `text` | `TextBlock` with `wrap: true` | Uses `n.Message` if `text` is empty |
|
||||
| `facts` | `FactSet` | Each fact becomes a `title`/`value` pair |
|
||||
| `sections[].facts` | `FactSet` | Facts extracted from sections |
|
||||
| `potentialAction[OpenUri]` | `Action.OpenUrl` | Only OpenUri actions are converted |
|
||||
| `themeColor` | Applied to title `TextBlock.color` | Supports semantic and hex colors |
|
||||
|
||||
## Migration from Office 365 Connectors
|
||||
|
||||
If you're currently using the `teams` service with Office 365 Connectors, follow these steps to migrate:
|
||||
|
||||
1. **Create a new Workflows webhook** using the configuration steps above
|
||||
|
||||
2. **Update your service configuration:**
|
||||
- Change from `service.teams` to `service.teams-workflows`
|
||||
- Update the webhook URL to your new Workflows webhook URL
|
||||
|
||||
3. **Update your templates:**
|
||||
- Change `teams:` to `teams-workflows:` in your templates
|
||||
- Your existing template fields (`title`, `text`, `facts`, `sections`, `potentialAction`) will automatically be converted to Adaptive Card format
|
||||
- No changes needed to your template structure - the conversion is automatic
|
||||
|
||||
4. **Update your subscriptions:**
|
||||
```yaml
|
||||
# Old
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
|
||||
|
||||
# New
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
|
||||
```
|
||||
|
||||
5. **Test and verify:**
|
||||
- Send a test notification to verify it works correctly
|
||||
- Once verified, you can remove the old Office 365 Connector configuration
|
||||
|
||||
**Note:** Your existing templates will work without modification. The service automatically converts your template fields to Adaptive Card format, so you get the benefits of modern cards without changing your templates.
|
||||
|
||||
## Differences from Office 365 Connectors
|
||||
|
||||
| Feature | Office 365 Connectors | Teams Workflows |
|
||||
|---------|----------------------|-----------------|
|
||||
| Service Name | `teams` | `teams-workflows` |
|
||||
| Standard Channels | ✅ | ✅ |
|
||||
| Shared Channels | ❌ | ✅ (Dec 2025+) |
|
||||
| Private Channels | ❌ | ✅ (Dec 2025+) |
|
||||
| Card Format | messageCard (legacy) | Adaptive Cards (modern) |
|
||||
| Template Conversion | N/A | Automatic conversion from template fields |
|
||||
| Retirement Date | March 31, 2026 | Active |
|
||||
|
||||
## Adaptive Card Features
|
||||
|
||||
The Teams Workflows service leverages Adaptive Cards, which provide:
|
||||
|
||||
- **Rich Content**: Support for text, images, fact sets, and more
|
||||
- **Flexible Layout**: Containers, columns, and adaptive layouts
|
||||
- **Interactive Elements**: Action buttons, input fields, and more
|
||||
- **Semantic Colors**: Built-in color schemes (Good, Warning, Attention, Accent)
|
||||
- **Cross-Platform**: Works across Teams, Outlook, and other Microsoft 365 apps
|
||||
|
||||
### Example: Advanced Adaptive Card Template
|
||||
|
||||
For complex notifications, you can use the full Adaptive Card specification:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded-advanced: |
|
||||
teams-workflows:
|
||||
adaptiveCard: |
|
||||
{
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.4",
|
||||
"body": [
|
||||
{
|
||||
"type": "Container",
|
||||
"items": [
|
||||
{
|
||||
"type": "ColumnSet",
|
||||
"columns": [
|
||||
{
|
||||
"type": "Column",
|
||||
"width": "auto",
|
||||
"items": [
|
||||
{
|
||||
"type": "Image",
|
||||
"url": "https://example.com/success-icon.png",
|
||||
"size": "Small"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "Column",
|
||||
"width": "stretch",
|
||||
"items": [
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}}",
|
||||
"weight": "Bolder",
|
||||
"size": "Large"
|
||||
},
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Successfully synced",
|
||||
"spacing": "None",
|
||||
"isSubtle": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "FactSet",
|
||||
"facts": [
|
||||
{
|
||||
"title": "Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"type": "Action.OpenUrl",
|
||||
"title": "View in Argo CD",
|
||||
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -1,18 +1,46 @@
|
||||
# Teams
|
||||
# Teams (Office 365 Connectors)
|
||||
|
||||
## ⚠️ Deprecation Notice
|
||||
|
||||
**Office 365 Connectors are being retired by Microsoft.**
|
||||
|
||||
Microsoft is retiring the Office 365 Connectors service in Teams. The service will be fully retired by **March 31, 2026** (extended from the original timeline of December 2025).
|
||||
|
||||
### What this means:
|
||||
- **Old Office 365 Connectors** (webhook URLs from `webhook.office.com`) will stop working after the retirement date
|
||||
- **New Power Automate Workflows** (webhook URLs from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`) are the recommended replacement
|
||||
|
||||
### Migration Required:
|
||||
If you are currently using Office 365 Connectors (Incoming Webhook), you should migrate to Power Automate Workflows before the retirement date. The notifications-engine automatically detects the webhook type and handles both formats, but you should plan your migration.
|
||||
|
||||
**Migration Resources:**
|
||||
- [Microsoft Deprecation Notice](https://devblogs.microsoft.com/microsoft365dev/retirement-of-office-365-connectors-within-microsoft-teams/)
|
||||
- [Create incoming webhooks with Workflows for Microsoft Teams](https://support.microsoft.com/en-us/office/create-incoming-webhooks-with-workflows-for-microsoft-teams-4b3b0b0e-0b5a-4b5a-9b5a-0b5a-4b5a-9b5a)
|
||||
|
||||
---
|
||||
|
||||
## Parameters
|
||||
|
||||
The Teams notification service send message notifications using Teams bot and requires specifying the following settings:
|
||||
The Teams notification service sends message notifications using Office 365 Connectors and requires specifying the following settings:
|
||||
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://example.com`
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://outlook.office.com/webhook/...`
|
||||
|
||||
> **⚠️ Deprecation Notice:** Office 365 Connectors will be retired by Microsoft on **March 31, 2026**. We recommend migrating to the [Teams Workflows service](./teams-workflows.md) for continued support and enhanced features.
|
||||
|
||||
## Configuration
|
||||
|
||||
> **💡 For Power Automate Workflows (Recommended):** See the [Teams Workflows documentation](./teams-workflows.md) for detailed configuration instructions.
|
||||
|
||||
### Office 365 Connectors (Deprecated - Retiring March 31, 2026)
|
||||
|
||||
> **⚠️ Warning:** This method is deprecated and will stop working after March 31, 2026. Please migrate to Power Automate Workflows.
|
||||
|
||||
1. Open `Teams` and goto `Apps`
|
||||
2. Find `Incoming Webhook` microsoft app and click on it
|
||||
3. Press `Add to a team` -> select team and channel -> press `Set up a connector`
|
||||
4. Enter webhook name and upload image (optional)
|
||||
5. Press `Create` then copy webhook url and store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
5. Press `Create` then copy webhook url (it will be from `webhook.office.com`)
|
||||
6. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -31,10 +59,20 @@ kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
channel-teams-url: https://example.com
|
||||
channel-teams-url: https://webhook.office.com/webhook/your-webhook-id # Office 365 Connector (deprecated)
|
||||
```
|
||||
|
||||
6. Create subscription for your Teams integration:
|
||||
> **Note:** For Power Automate Workflows webhooks, use the [Teams Workflows service](./teams-workflows.md) instead.
|
||||
|
||||
### Webhook Type Detection
|
||||
|
||||
The `teams` service supports Office 365 Connectors (deprecated):
|
||||
|
||||
- **Office 365 Connectors**: URLs from `webhook.office.com` (deprecated)
|
||||
- Requires response body to be exactly `"1"` for success
|
||||
- Will stop working after March 31, 2026
|
||||
|
||||
7. Create subscription for your Teams integration:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -44,12 +82,20 @@ metadata:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
|
||||
```
|
||||
|
||||
## Channel Support
|
||||
|
||||
- ✅ Standard Teams channels only
|
||||
|
||||
> **Note:** Office 365 Connectors only support standard Teams channels. For shared channels or private channels, use the [Teams Workflows service](./teams-workflows.md).
|
||||
|
||||
## Templates
|
||||
|
||||

|
||||
|
||||
[Notification templates](../templates.md) can be customized to leverage teams message sections, facts, themeColor, summary and potentialAction [feature](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using).
|
||||
|
||||
The Teams service uses the **messageCard** format (MessageCard schema) which is compatible with Office 365 Connectors.
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams:
|
||||
@@ -124,3 +170,7 @@ template.app-sync-succeeded: |
|
||||
teams:
|
||||
summary: "Sync Succeeded"
|
||||
```
|
||||
|
||||
## Migration to Teams Workflows
|
||||
|
||||
If you're currently using Office 365 Connectors, see the [Teams Workflows documentation](./teams-workflows.md) for migration instructions and enhanced features.
|
||||
|
||||
@@ -14,6 +14,10 @@ The Webhook notification service configuration includes following settings:
|
||||
- `retryWaitMin` - Optional, the minimum wait time between retries. Default value: 1s.
|
||||
- `retryWaitMax` - Optional, the maximum wait time between retries. Default value: 5s.
|
||||
- `retryMax` - Optional, the maximum number of retries. Default value: 3.
|
||||
- `maxIdleConns` - optional, maximum number of idle (keep-alive) connections across all hosts.
|
||||
- `maxIdleConnsPerHost` - optional, maximum number of idle (keep-alive) connections per host.
|
||||
- `maxConnsPerHost` - optional, maximum total connections per host.
|
||||
- `idleConnTimeout` - optional, maximum amount of time an idle (keep-alive) connection will remain open before closing, e.g. '90s'.
|
||||
|
||||
## Retry Behavior
|
||||
|
||||
@@ -74,6 +78,29 @@ metadata:
|
||||
notifications.argoproj.io/subscribe.<trigger-name>.<webhook-name>: ""
|
||||
```
|
||||
|
||||
4. TLS configuration (optional)
|
||||
|
||||
If your webhook server uses a custom TLS certificate, you can configure the notification service to trust it by adding the certificate to the `argocd-tls-certs-cm` ConfigMap as shown below:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-tls-certs-cm
|
||||
data:
|
||||
<hostname>: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
<TLS DATA>
|
||||
-----END CERTIFICATE-----
|
||||
```
|
||||
|
||||
*NOTE:*
|
||||
*If the custom certificate is not trusted, you may encounter errors such as:*
|
||||
```
|
||||
Put \"https://...\": x509: certificate signed by unknown authority
|
||||
```
|
||||
*Adding the server's certificate to `argocd-tls-certs-cm` resolves this issue.*
|
||||
|
||||
## Examples
|
||||
|
||||
### Set GitHub commit status
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 3.2 | v1.33, v1.32, v1.31, v1.30 |
|
||||
| 3.1 | v1.33, v1.32, v1.31, v1.30 |
|
||||
| 3.2 | v1.34, v1.33, v1.32, v1.31 |
|
||||
| 3.1 | v1.34, v1.33, v1.32, v1.31 |
|
||||
| 3.0 | v1.32, v1.31, v1.30, v1.29 |
|
||||
|
||||
@@ -55,4 +55,16 @@ spec:
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
```
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [route53.aws.crossplane.io/ResourceRecordSet](https://github.com/argoproj/argo-cd/commit/666499f6108124ef7bfa0c6cc616770c6dc4f42c)
|
||||
* [cloudfront.aws.crossplane.io/Distribution](https://github.com/argoproj/argo-cd/commit/21c384f42354ada2b94c18773104527eb27f86bc)
|
||||
* [beat.k8s.elastic.co/Beat](https://github.com/argoproj/argo-cd/commit/5100726fd61617a0001a27233cfe8ac4354bdbed)
|
||||
* [apps.kruise.io/AdvancedCronjob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/BroadcastJob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/CloneSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/DaemonSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/StatefulSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [rollouts.kruise.io/Rollout](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
@@ -57,3 +57,24 @@ The affected ApplicationSet fields are the following (jq selector syntax):
|
||||
* `.spec.generators[].clusterDecisionResource.labelSelector`
|
||||
* `.spec.generators[].matrix.generators[].selector`
|
||||
* `.spec.generators[].merge.generators[].selector`
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [core.humio.com/HumioAction](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioAlert](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioCluster](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioIngestToken](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioParser](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioRepository](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioView](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [k8s.mariadb.com/Backup](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/Database](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/Grant](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/MariaDB](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/SqlJob](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/User](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [kafka.strimzi.io/KafkaBridge](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
|
||||
* [kafka.strimzi.io/KafkaConnector](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
|
||||
* [keda.sh/ScaledObject](https://github.com/argoproj/argo-cd/commit/9bc9ff9c7a3573742a767c38679cbefb4f07c1c0)
|
||||
* [openfaas.com/Function](https://github.com/argoproj/argo-cd/commit/2a05ae02ab90ae06fefa97ed6b9310590d317783)
|
||||
* [camel.apache.org/Integration](https://github.com/argoproj/argo-cd/commit/1e2f5987d25307581cd56b8fe9d329633e0f704f)
|
||||
@@ -68,6 +68,41 @@ The default extension for log files generated by Argo CD when using the "Downloa
|
||||
- Consistency with standard log file conventions.
|
||||
|
||||
If you have any custom scripts or tools that depend on the `.txt` extension, please update them accordingly.
|
||||
|
||||
## Added proxy to kustomize
|
||||
|
||||
Proxy config set on repository credentials / repository templates is now passed down to the `kustomize build` command.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [controlplane.cluster.x-k8s.io/AWSManagedControlPlane](https://github.com/argoproj/argo-cd/commit/f1105705126153674c79f69b5d9c9647360d16f5)
|
||||
* [policy.open-cluster-management.io/CertificatePolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/ConfigurationPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/OperatorPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/Policy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [PodDisruptionBudget](https://github.com/argoproj/argo-cd/commit/e86258d8a5049260b841abc0ef1fd7f7a4b7cd45)
|
||||
* [cluster.x-k8s.io/MachinePool](https://github.com/argoproj/argo-cd/commit/59e00911304288b4f96889bf669b6ed2aecdf31b)
|
||||
* [lifecycle.keptn.sh/KeptnWorkloadVersion](https://github.com/argoproj/argo-cd/commit/ddc0b0fd3fa7e0b53170582846b20be23c301185)
|
||||
* [numaplane.numaproj.io/ISBServiceRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [numaplane.numaproj.io/NumaflowControllerRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [numaplane.numaproj.io/PipelineRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [rds.aws.crossplane.io/DBCluster](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
|
||||
* [rds.aws.crossplane.io/DBInstance](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
|
||||
* [iam.aws.crossplane.io/Policy](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [iam.aws.crossplane.io/RolePolicyAttachment](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [iam.aws.crossplane.io/Role](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [s3.aws.crossplane.io/Bucket](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [metrics.keptn.sh/KeptnMetric](https://github.com/argoproj/argo-cd/commit/326cc4a06b2cb5ac99797d3f04c2d4c48b8692e2)
|
||||
* [metrics.keptn.sh/Analysis](https://github.com/argoproj/argo-cd/commit/e26c105e527ed262cc5dc838a793841017ba316a)
|
||||
* [numaplane.numaproj.io/MonoVertexRollout](https://github.com/argoproj/argo-cd/commit/32ee00f1f494f69cc84d1881dda70ce514e1f737)
|
||||
* [helm.toolkit.fluxcd.io/HelmRelease](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImagePolicy](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImageRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImageUpdateAutomation](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [kustomize.toolkit.fluxcd.io/Kustomization](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [notification.toolkit.fluxcd.io/Receiver](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/Bucket](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/GitRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/HelmChart](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/HelmRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/OCIRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
|
||||
@@ -20,3 +20,27 @@ the [CLI and Application CR](https://argo-cd.readthedocs.io/en/latest/user-guide
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [platform.confluent.io/Connector](https://github.com/argoproj/argo-cd/commit/99efafb55a553a9ab962d56c20dab54ba65b7ae0)
|
||||
* [addons.cluster.x-k8s.io/ClusterResourceSet](https://github.com/argoproj/argo-cd/commit/fdf539dc6a027ef975fde23bf734f880570ccdc3)
|
||||
* [numaflow.numaproj.io/InterStepBufferService](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/MonoVertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/Pipeline](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/Vertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [acid.zalan.do/Postgresql](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [grafana.integreatly.org/Grafana](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [grafana.integreatly.org/GrafanaDatasource](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [k8s.keycloak.org/Keycloak](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [solr.apache.org/SolrCloud](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [gateway.solo.io/Gateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/MatchableHttpGateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/RouteOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/RouteTable](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/VirtualHostOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/VirtualService](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Proxy](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Settings](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Upstream](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/UpstreamGroup](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
@@ -501,3 +501,7 @@ More details for ignored resource updates in the [Diffing customization](../../u
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* No new added health checks
|
||||
@@ -63,3 +63,25 @@ to the [release notes](https://github.com/kubernetes-sigs/kustomize/releases/tag
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [core.spinkube.dev/SpinApp](https://github.com/argoproj/argo-cd/commit/7d6604404fd3b7d77124f9623a2d7a12cc24a0bb)
|
||||
* [opentelemetry.io/OpenTelemetryCollector](https://github.com/argoproj/argo-cd/commit/65464d8b77941c65499028bb14172fc40e62e38b)
|
||||
* [logstash.k8s.elastic.co/Logstash](https://github.com/argoproj/argo-cd/commit/8f1f5c7234e694a4830744f92e1b0f8d1e3cd43d)
|
||||
* [kyverno.io/Policy](https://github.com/argoproj/argo-cd/commit/e578b85410f748c6c7b4e10ff1a5fdbca09b3328)
|
||||
* [projectcontour.io/HTTPProxy](https://github.com/argoproj/argo-cd/commit/ce4b7a28cc77959fab5b6fedd14b1f9e9a4af4f7)
|
||||
* [grafana.integreatly.org/GrafanaDashboard](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
|
||||
* [grafana.integreatly.org/GrafanaFolder](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
|
||||
* [postgresql.cnpg.io/Cluster](https://github.com/argoproj/argo-cd/commit/f4edcf7717940e44a141dadb5ca8c5fc11951cb2)
|
||||
* [gateway.networking.k8s.io/GRPCRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [gateway.networking.k8s.io/Gateway](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [gateway.networking.k8s.io/HTTPRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [rabbitmq.com/Binding](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Exchange](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Permission](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Policy](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Queue](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Shovel](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/User](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Vhost](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
### Hydration paths must now be non-root
|
||||
|
||||
Source hydration now requires that every application specify a non-root path.
|
||||
Source hydration (with [Source Hydrator](../../../user-guide/source-hydrator/)) now requires that every application specify a non-root path.
|
||||
Using the repository root (for example, "" or ".") is no longer supported. This change ensures
|
||||
that hydration outputs are isolated to a dedicated subdirectory and prevents accidental overwrites
|
||||
or deletions of important files stored at the root, such as CI pipelines, documentation, or configuration files.
|
||||
@@ -92,3 +92,30 @@ credentials of project-scoped repositories and clusters.
|
||||
The `resources` field of the `status` resource of an ApplicationSet is now limited to 5000 elements by default. This is
|
||||
to prevent status bloat and exceeding etcd limits. The limit can be configured by setting the `applicationsetcontroller.status.max.resources.count`
|
||||
field in the `argocd-cmd-params-cm` ConfigMap.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [datadoghq.com/DatadogMetric](https://github.com/argoproj/argo-cd/commit/5c9a5ef9a65f8e04e729fbae54a9310c0a42f6c2)
|
||||
* [CronJob](https://github.com/argoproj/argo-cd/commit/d3de4435ce86f3f85a4cc58978b2544af2ac4248)
|
||||
* [promoter.argoproj.io/ArgoCDCommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/ChangeTransferPolicy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/CommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/PromotionStrategy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/PullRequest](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [coralogix.com/Alert](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
|
||||
* [coralogix.com/RecordingRuleGroupSet](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
|
||||
* [projectcontour.io/ExtensionService](https://github.com/argoproj/argo-cd/commit/4e63bc756394d93c684b6b8e8b3856e0e6b3f199)
|
||||
* [clickhouse-keeper.altinity.com/ClickHouseKeeperInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
|
||||
* [clickhouse.altinity.com/ClickHouseInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
|
||||
* [apps.3scale.net/APIManager](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ActiveDoc](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ApplicationAuth](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Application](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Backend](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/CustomPolicyDefinition](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/DeveloperAccount](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/DeveloperUser](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/OpenAPI](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Product](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ProxyConfigPromote](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Tenant](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
@@ -17,11 +17,12 @@ Adding the argocd.argoproj.io/hook annotation to a resource will assign it to a
|
||||
|
||||
## How phases work?
|
||||
|
||||
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following.
|
||||
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following:
|
||||
|
||||
1. Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
|
||||
2. Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
|
||||
3. Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
|
||||
|
||||
Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
|
||||
Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
|
||||
Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
|
||||
Hooks marked with Skip will not be applied.
|
||||
|
||||
Here is a graphical overview of the sync process:
|
||||
@@ -54,8 +55,9 @@ Argo CD also offers an alternative method of changing the sync order of resource
|
||||
Hooks and resources are assigned to wave 0 by default. The wave can be negative, so you can create a wave that runs before all other resources.
|
||||
|
||||
When a sync operation takes place, Argo CD will:
|
||||
Order all resources according to their wave (lowest to highest)
|
||||
Apply the resources according to the resulting sequence
|
||||
|
||||
1. Order all resources according to their wave (lowest to highest)
|
||||
2. Apply the resources according to the resulting sequence
|
||||
|
||||
There is currently a delay between each sync wave in order to give other controllers a chance to react to the spec change that was just applied. This also prevents Argo CD from assessing resource health too quickly (against the stale object), causing hooks to fire prematurely. The current delay between each sync wave is 2 seconds and can be configured via the environment variable ARGOCD_SYNC_WAVE_DELAY.
|
||||
|
||||
@@ -67,16 +69,16 @@ While you can use sync waves on their own, for maximum flexibility you can combi
|
||||
|
||||
When Argo CD starts a sync, it orders the resources in the following precedence:
|
||||
|
||||
The phase
|
||||
The wave they are in (lower values first)
|
||||
By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
|
||||
By name
|
||||
1. The phase
|
||||
2. The wave they are in (lower values first)
|
||||
3. By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
|
||||
4. By name
|
||||
|
||||
Once the order is defined:
|
||||
|
||||
First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
|
||||
It applies resources in that wave.
|
||||
It repeats this process until all phases and waves are in-sync and healthy.
|
||||
1. First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
|
||||
2. It applies resources in that wave.
|
||||
3. It repeats this process until all phases and waves are in-sync and healthy.
|
||||
|
||||
Because an application can have resources that are unhealthy in the first wave, it may be that the app can never get to healthy.
|
||||
|
||||
|
||||
26
go.mod
26
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/argoproj/argo-cd/v3
|
||||
|
||||
go 1.25.0
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
code.gitea.io/sdk/gitea v0.22.0
|
||||
@@ -12,8 +12,8 @@ require (
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/TomOnTime/utfutil v1.0.0
|
||||
github.com/alicebob/miniredis/v2 v2.35.0
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251108235403-13d5172d3ee2
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630
|
||||
github.com/argoproj/pkg v0.13.6
|
||||
github.com/argoproj/pkg/v2 v2.0.1
|
||||
github.com/aws/aws-sdk-go v1.55.7
|
||||
@@ -29,7 +29,7 @@ require (
|
||||
github.com/dlclark/regexp2 v1.11.5
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible
|
||||
github.com/expr-lang/expr v1.17.6
|
||||
github.com/expr-lang/expr v1.17.7
|
||||
github.com/felixge/httpsnoop v1.0.4
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/gfleury/go-bitbucket-v1 v0.0.0-20240917142304-df385efaac68
|
||||
@@ -92,11 +92,11 @@ require (
|
||||
go.opentelemetry.io/otel v1.38.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
|
||||
go.opentelemetry.io/otel/sdk v1.38.0
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/net v0.44.0
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/net v0.47.0
|
||||
golang.org/x/oauth2 v0.31.0
|
||||
golang.org/x/sync v0.17.0
|
||||
golang.org/x/term v0.35.0
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/term v0.38.0
|
||||
golang.org/x/time v0.13.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5
|
||||
google.golang.org/grpc v1.75.1
|
||||
@@ -267,10 +267,10 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect
|
||||
gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
@@ -290,7 +290,7 @@ require (
|
||||
k8s.io/controller-manager v0.34.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kube-aggregator v0.34.0 // indirect
|
||||
k8s.io/kubernetes v1.34.0 // indirect
|
||||
k8s.io/kubernetes v1.34.2 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
|
||||
44
go.sum
44
go.sum
@@ -113,10 +113,10 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251108235403-13d5172d3ee2 h1:g9XclEd+1mYQtpLL3rMgYbMUdB3kEppw3+Jd1/H54VM=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251108235403-13d5172d3ee2/go.mod h1:2nqYZBhj8CfVZb3ATakZpi1KNb/yc7mpadIHslicTFI=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff h1:pGGAeHIktPuYCRl1Z540XdxPFnedqyUhJK4VgpyJZfY=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d h1:iUJYrbSvpV9n8vyl1sBt1GceM60HhHfnHxuzcm5apDg=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d/go.mod h1:PauXVUVcfiTgC+34lDdWzPS101g4NpsUtDAjFBnWf94=
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630 h1:naE5KNRTOALjF5nVIGUHrHU5xjlB8QJJiCu+aISIlSs=
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
|
||||
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
|
||||
github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8=
|
||||
github.com/argoproj/pkg/v2 v2.0.1 h1:O/gCETzB/3+/hyFL/7d/VM/6pSOIRWIiBOTb2xqAHvc=
|
||||
@@ -257,8 +257,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/expr-lang/expr v1.17.6 h1:1h6i8ONk9cexhDmowO/A64VPxHScu7qfSl2k8OlINec=
|
||||
github.com/expr-lang/expr v1.17.6/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
|
||||
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||
@@ -974,8 +974,8 @@ golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -1018,8 +1018,8 @@ golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1085,8 +1085,8 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1111,8 +1111,8 @@ golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1195,8 +1195,8 @@ golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b/go.mod h1:4ZwOYna0/zsOKwuR5X/m0QFOJpSZvAxFfkQT+Erd9D4=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -1223,8 +1223,8 @@ golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1249,8 +1249,8 @@ golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1434,8 +1434,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs=
|
||||
k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4=
|
||||
k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w=
|
||||
k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
|
||||
k8s.io/kubernetes v1.34.2 h1:WQdDvYJazkmkwSncgNwGvVtaCt4TYXIU3wSMRgvp3MI=
|
||||
k8s.io/kubernetes v1.34.2/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/argoproj/argo-cd/get-previous-release
|
||||
|
||||
go 1.23.5
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.7
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.7
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
12
manifests/core-install-with-hydrator.yaml
generated
12
manifests/core-install-with-hydrator.yaml
generated
@@ -24850,7 +24850,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -24985,7 +24985,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25113,7 +25113,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25410,7 +25410,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25462,7 +25462,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -25810,7 +25810,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
10
manifests/core-install.yaml
generated
10
manifests/core-install.yaml
generated
@@ -24818,7 +24818,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -24947,7 +24947,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25244,7 +25244,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25296,7 +25296,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -25644,7 +25644,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.7
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.1
|
||||
newTag: v3.2.7
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
18
manifests/ha/install-with-hydrator.yaml
generated
18
manifests/ha/install-with-hydrator.yaml
generated
@@ -26216,7 +26216,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -26351,7 +26351,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26502,7 +26502,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -26598,7 +26598,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -26722,7 +26722,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -27045,7 +27045,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -27097,7 +27097,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -27471,7 +27471,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -27855,7 +27855,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/install.yaml
generated
16
manifests/ha/install.yaml
generated
@@ -26186,7 +26186,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -26338,7 +26338,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -26434,7 +26434,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -26558,7 +26558,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -26881,7 +26881,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26933,7 +26933,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -27307,7 +27307,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -27691,7 +27691,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
@@ -1897,7 +1897,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2032,7 +2032,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2183,7 +2183,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2279,7 +2279,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2403,7 +2403,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2726,7 +2726,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2778,7 +2778,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3152,7 +3152,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3536,7 +3536,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/namespace-install.yaml
generated
16
manifests/ha/namespace-install.yaml
generated
@@ -1867,7 +1867,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2019,7 +2019,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2115,7 +2115,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2239,7 +2239,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2562,7 +2562,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2614,7 +2614,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2988,7 +2988,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3372,7 +3372,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/install-with-hydrator.yaml
generated
18
manifests/install-with-hydrator.yaml
generated
@@ -25294,7 +25294,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -25429,7 +25429,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25580,7 +25580,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -25676,7 +25676,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -25778,7 +25778,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -26075,7 +26075,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26127,7 +26127,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -26499,7 +26499,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -26883,7 +26883,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/install.yaml
generated
16
manifests/install.yaml
generated
@@ -25262,7 +25262,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -25414,7 +25414,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -25510,7 +25510,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -25612,7 +25612,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25909,7 +25909,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25961,7 +25961,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -26333,7 +26333,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -26717,7 +26717,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/namespace-install-with-hydrator.yaml
generated
18
manifests/namespace-install-with-hydrator.yaml
generated
@@ -975,7 +975,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1110,7 +1110,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1261,7 +1261,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1357,7 +1357,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1459,7 +1459,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1756,7 +1756,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1808,7 +1808,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2180,7 +2180,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2564,7 +2564,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/namespace-install.yaml
generated
16
manifests/namespace-install.yaml
generated
@@ -943,7 +943,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1095,7 +1095,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1191,7 +1191,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1293,7 +1293,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1590,7 +1590,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1642,7 +1642,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2014,7 +2014,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2398,7 +2398,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.1
|
||||
image: quay.io/argoproj/argocd:v3.2.7
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -92,6 +92,7 @@ nav:
|
||||
- operator-manual/notifications/services/pushover.md
|
||||
- operator-manual/notifications/services/rocketchat.md
|
||||
- operator-manual/notifications/services/slack.md
|
||||
- operator-manual/notifications/services/teams-workflows.md
|
||||
- operator-manual/notifications/services/teams.md
|
||||
- operator-manual/notifications/services/telegram.md
|
||||
- operator-manual/notifications/services/webex.md
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func Test_parseHeaders(t *testing.T) {
|
||||
@@ -39,3 +49,234 @@ func Test_parseGRPCHeaders(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "additional headers must be colon(:)-separated: foo")
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecuteRequest_ClosesBodyOnHTTPError(t *testing.T) {
|
||||
bodyClosed := &atomic.Bool{}
|
||||
|
||||
// Create a test server that returns HTTP 500 error
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create client with custom httpClient that tracks body closure
|
||||
originalTransport := http.DefaultTransport
|
||||
customTransport := &testTransport{
|
||||
base: originalTransport,
|
||||
bodyClosed: bodyClosed,
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:], // Remove "http://"
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Execute request that should fail with HTTP 500
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
_, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
|
||||
|
||||
// Verify error was returned
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed with status code 500")
|
||||
|
||||
// Give a small delay to ensure Close() was called
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify body was closed to prevent connection leak
|
||||
assert.True(t, bodyClosed.Load(), "response body should be closed on HTTP error to prevent connection leak")
|
||||
}
|
||||
|
||||
func TestExecuteRequest_ClosesBodyOnGRPCError(t *testing.T) {
|
||||
bodyClosed := &atomic.Bool{}
|
||||
|
||||
// Create a test server that returns HTTP 200 but with gRPC error status
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Grpc-Status", "3") // codes.InvalidArgument
|
||||
w.Header().Set("Grpc-Message", "invalid argument")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create client with custom httpClient that tracks body closure
|
||||
originalTransport := http.DefaultTransport
|
||||
customTransport := &testTransport{
|
||||
base: originalTransport,
|
||||
bodyClosed: bodyClosed,
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:], // Remove "http://"
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Execute request that should fail with gRPC error
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
_, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
|
||||
|
||||
// Verify gRPC error was returned
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid argument")
|
||||
|
||||
// Give a small delay to ensure Close() was called
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify body was closed to prevent connection leak
|
||||
assert.True(t, bodyClosed.Load(), "response body should be closed on gRPC error to prevent connection leak")
|
||||
}
|
||||
|
||||
func TestExecuteRequest_ConcurrentErrorRequests_NoConnectionLeak(t *testing.T) {
|
||||
// This test simulates the scenario from the test script:
|
||||
// Multiple concurrent requests that fail should all close their response bodies
|
||||
|
||||
var totalRequests atomic.Int32
|
||||
var closedBodies atomic.Int32
|
||||
|
||||
// Create a test server that always returns errors
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
totalRequests.Add(1)
|
||||
// Alternate between HTTP errors and gRPC errors
|
||||
if totalRequests.Load()%2 == 0 {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
w.Header().Set("Grpc-Status", strconv.Itoa(int(codes.PermissionDenied)))
|
||||
w.Header().Set("Grpc-Message", "permission denied")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create client with custom transport that tracks closures
|
||||
customTransport := &testTransport{
|
||||
base: http.DefaultTransport,
|
||||
bodyClosed: &atomic.Bool{},
|
||||
onClose: func() {
|
||||
closedBodies.Add(1)
|
||||
},
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:],
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Simulate concurrent requests like in the test script
|
||||
concurrency := 10
|
||||
iterations := 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for iter := 0; iter < iterations; iter++ {
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
_, err := c.executeRequest(ctx, "/application.ApplicationService/ManagedResources", []byte("test"), md)
|
||||
// We expect errors
|
||||
assert.Error(t, err)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Give time for all Close() calls to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify all response bodies were closed
|
||||
expectedTotal := int32(concurrency * iterations)
|
||||
assert.Equal(t, expectedTotal, totalRequests.Load(), "all requests should have been made")
|
||||
assert.Equal(t, expectedTotal, closedBodies.Load(), "all response bodies should be closed to prevent connection leaks")
|
||||
}
|
||||
|
||||
func TestExecuteRequest_SuccessDoesNotCloseBodyPrematurely(t *testing.T) {
|
||||
// Verify that successful requests do NOT close the body in executeRequest
|
||||
// (caller is responsible for closing in success case)
|
||||
|
||||
bodyClosed := &atomic.Bool{}
|
||||
|
||||
// Create a test server that returns success
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Grpc-Status", "0") // codes.OK
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
customTransport := &testTransport{
|
||||
base: http.DefaultTransport,
|
||||
bodyClosed: bodyClosed,
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:],
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Execute successful request
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
resp, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
|
||||
|
||||
// Verify success
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Verify body was NOT closed by executeRequest (caller's responsibility)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
assert.False(t, bodyClosed.Load(), "response body should NOT be closed by executeRequest on success - caller is responsible")
|
||||
}
|
||||
|
||||
// testTransport wraps http.RoundTripper to track body closures
|
||||
type testTransport struct {
|
||||
base http.RoundTripper
|
||||
bodyClosed *atomic.Bool
|
||||
onClose func() // Optional callback for each close
|
||||
}
|
||||
|
||||
func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
resp, err := t.base.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap the response body to track Close() calls
|
||||
resp.Body = &closeTracker{
|
||||
ReadCloser: resp.Body,
|
||||
closed: t.bodyClosed,
|
||||
onClose: t.onClose,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type closeTracker struct {
|
||||
io.ReadCloser
|
||||
closed *atomic.Bool
|
||||
onClose func()
|
||||
}
|
||||
|
||||
func (c *closeTracker) Close() error {
|
||||
c.closed.Store(true)
|
||||
if c.onClose != nil {
|
||||
c.onClose()
|
||||
}
|
||||
return c.ReadCloser.Close()
|
||||
}
|
||||
|
||||
@@ -86,6 +86,9 @@ func (c *client) executeRequest(ctx context.Context, fullMethodName string, msg
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.Body != nil {
|
||||
utilio.Close(resp.Body)
|
||||
}
|
||||
return nil, fmt.Errorf("%s %s failed with status code %d", req.Method, req.URL, resp.StatusCode)
|
||||
}
|
||||
var code codes.Code
|
||||
@@ -97,6 +100,9 @@ func (c *client) executeRequest(ctx context.Context, fullMethodName string, msg
|
||||
code = codes.Code(statusInt)
|
||||
}
|
||||
if code != codes.OK {
|
||||
if resp.Body != nil {
|
||||
utilio.Close(resp.Body)
|
||||
}
|
||||
return nil, status.Error(code, resp.Header.Get("Grpc-Message"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2407,7 +2407,7 @@ func (s *Server) TerminateOperation(ctx context.Context, termOpReq *application.
|
||||
}
|
||||
log.Warnf("failed to set operation for app %q due to update conflict. retrying again...", *termOpReq.Name)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
|
||||
a, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting application by name: %w", err)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
k8sbatchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -4030,3 +4031,75 @@ func TestServerSideDiff(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "application")
|
||||
})
|
||||
}
|
||||
|
||||
// TestTerminateOperationWithConflicts tests that TerminateOperation properly handles
|
||||
// concurrent update conflicts by retrying with the fresh application object.
|
||||
//
|
||||
// This test reproduces a bug where the retry loop discards the fresh app object
|
||||
// fetched from Get(), causing all retries to fail with stale resource versions.
|
||||
func TestTerminateOperationWithConflicts(t *testing.T) {
|
||||
testApp := newTestApp()
|
||||
testApp.ResourceVersion = "1"
|
||||
testApp.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
}
|
||||
testApp.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: *testApp.Operation,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
|
||||
appServer := newTestAppServer(t, testApp)
|
||||
ctx := context.Background()
|
||||
|
||||
// Get the fake clientset from the deepCopy wrapper
|
||||
fakeAppCs := appServer.appclientset.(*deepCopyAppClientset).GetUnderlyingClientSet().(*apps.Clientset)
|
||||
|
||||
getCallCount := 0
|
||||
updateCallCount := 0
|
||||
|
||||
// Remove default reactors and add our custom ones
|
||||
fakeAppCs.ReactionChain = nil
|
||||
|
||||
// Mock Get to return original version first, then fresh version
|
||||
fakeAppCs.AddReactor("get", "applications", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
getCallCount++
|
||||
freshApp := testApp.DeepCopy()
|
||||
if getCallCount == 1 {
|
||||
// First Get (for initialization) returns original version
|
||||
freshApp.ResourceVersion = "1"
|
||||
} else {
|
||||
// Subsequent Gets (during retry) return fresh version
|
||||
freshApp.ResourceVersion = "2"
|
||||
}
|
||||
return true, freshApp, nil
|
||||
})
|
||||
|
||||
// Mock Update to return conflict on first call, success on second
|
||||
fakeAppCs.AddReactor("update", "applications", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
updateCallCount++
|
||||
updateAction := action.(kubetesting.UpdateAction)
|
||||
app := updateAction.GetObject().(*v1alpha1.Application)
|
||||
|
||||
// First call (with original resource version): return conflict
|
||||
if app.ResourceVersion == "1" {
|
||||
return true, nil, apierrors.NewConflict(
|
||||
schema.GroupResource{Group: "argoproj.io", Resource: "applications"},
|
||||
app.Name,
|
||||
stderrors.New("the object has been modified"),
|
||||
)
|
||||
}
|
||||
|
||||
// Second call (with refreshed resource version from Get): return success
|
||||
updatedApp := app.DeepCopy()
|
||||
return true, updatedApp, nil
|
||||
})
|
||||
|
||||
// Attempt to terminate the operation
|
||||
_, err := appServer.TerminateOperation(ctx, &application.OperationTerminateRequest{
|
||||
Name: ptr.To(testApp.Name),
|
||||
})
|
||||
|
||||
// Should succeed after retrying with the fresh app
|
||||
require.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, updateCallCount, 2, "Update should be called at least twice (once with conflict, once with success)")
|
||||
}
|
||||
|
||||
@@ -1220,9 +1220,12 @@ func (server *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWeb
|
||||
|
||||
terminalOpts := application.TerminalOptions{DisableAuth: server.DisableAuth, Enf: server.enf}
|
||||
|
||||
// SSO ClientApp
|
||||
server.ssoClientApp, _ = oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
|
||||
|
||||
terminal := application.NewHandler(server.appLister, server.Namespace, server.ApplicationNamespaces, server.db, appResourceTreeFn, server.settings.ExecShells, server.sessionMgr, &terminalOpts).
|
||||
WithFeatureFlagMiddleware(server.settingsMgr.GetSettings)
|
||||
th := util_session.WithAuthMiddleware(server.DisableAuth, server.sessionMgr, terminal)
|
||||
th := util_session.WithAuthMiddleware(server.DisableAuth, server.settings.IsSSOConfigured(), server.ssoClientApp, server.sessionMgr, terminal)
|
||||
mux.Handle("/terminal", th)
|
||||
|
||||
// Proxy extension is currently an alpha feature and is disabled
|
||||
@@ -1252,7 +1255,7 @@ func (server *ArgoCDServer) newHTTPServer(ctx context.Context, port int, grpcWeb
|
||||
swagger.ServeSwaggerUI(mux, assets.SwaggerJSON, "/swagger-ui", server.RootPath)
|
||||
healthz.ServeHealthCheck(mux, server.healthCheck)
|
||||
|
||||
// Dex reverse proxy and client app and OAuth2 login/callback
|
||||
// Dex reverse proxy and OAuth2 login/callback
|
||||
server.registerDexHandlers(mux)
|
||||
|
||||
// Webhook handler for git events (Note: cache timeouts are hardcoded because API server does not write to cache and not really using them)
|
||||
@@ -1304,7 +1307,7 @@ func enforceContentTypes(handler http.Handler, types []string) http.Handler {
|
||||
func registerExtensions(mux *http.ServeMux, a *ArgoCDServer, metricsReg HTTPMetricsRegistry) {
|
||||
a.log.Info("Registering extensions...")
|
||||
extHandler := http.HandlerFunc(a.extensionManager.CallExtension())
|
||||
authMiddleware := a.sessionMgr.AuthMiddlewareFunc(a.DisableAuth)
|
||||
authMiddleware := a.sessionMgr.AuthMiddlewareFunc(a.DisableAuth, a.settings.IsSSOConfigured(), a.ssoClientApp)
|
||||
// auth middleware ensures that requests to all extensions are authenticated first
|
||||
mux.Handle(extension.URLPrefix+"/", authMiddleware(extHandler))
|
||||
|
||||
@@ -1358,7 +1361,7 @@ func (server *ArgoCDServer) serveExtensions(extensionsSharedPath string, w http.
|
||||
}
|
||||
}
|
||||
|
||||
// registerDexHandlers will register dex HTTP handlers, creating the OAuth client app
|
||||
// registerDexHandlers will register dex HTTP handlers
|
||||
func (server *ArgoCDServer) registerDexHandlers(mux *http.ServeMux) {
|
||||
if !server.settings.IsSSOConfigured() {
|
||||
return
|
||||
@@ -1366,7 +1369,6 @@ func (server *ArgoCDServer) registerDexHandlers(mux *http.ServeMux) {
|
||||
// Run dex OpenID Connect Identity Provider behind a reverse proxy (served at /api/dex)
|
||||
var err error
|
||||
mux.HandleFunc(common.DexAPIEndpoint+"/", dexutil.NewDexHTTPReverseProxy(server.DexServerAddr, server.BaseHRef, server.DexTLSConfig))
|
||||
server.ssoClientApp, err = oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
|
||||
errorsutil.CheckError(err)
|
||||
mux.HandleFunc(common.LoginEndpoint, server.ssoClientApp.HandleLogin)
|
||||
mux.HandleFunc(common.CallbackEndpoint, server.ssoClientApp.HandleCallback)
|
||||
@@ -1577,34 +1579,15 @@ func (server *ArgoCDServer) getClaims(ctx context.Context) (jwt.Claims, string,
|
||||
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
|
||||
}
|
||||
|
||||
// Some SSO implementations (Okta) require a call to
|
||||
// the OIDC user info path to get attributes like groups
|
||||
// we assume that everywhere in argocd jwt.MapClaims is used as type for interface jwt.Claims
|
||||
// otherwise this would cause a panic
|
||||
var groupClaims jwt.MapClaims
|
||||
if groupClaims, ok = claims.(jwt.MapClaims); !ok {
|
||||
if tmpClaims, ok := claims.(*jwt.MapClaims); ok {
|
||||
groupClaims = *tmpClaims
|
||||
}
|
||||
}
|
||||
iss := jwtutil.StringField(groupClaims, "iss")
|
||||
if iss != util_session.SessionManagerClaimsIssuer && server.settings.UserInfoGroupsEnabled() && server.settings.UserInfoPath() != "" {
|
||||
userInfo, unauthorized, err := server.ssoClientApp.GetUserInfo(groupClaims, server.settings.IssuerURL(), server.settings.UserInfoPath())
|
||||
if unauthorized {
|
||||
log.Errorf("error while quering userinfo endpoint: %v", err)
|
||||
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session")
|
||||
}
|
||||
finalClaims := claims
|
||||
if server.settings.IsSSOConfigured() {
|
||||
finalClaims, err = server.ssoClientApp.SetGroupsFromUserInfo(claims, util_session.SessionManagerClaimsIssuer)
|
||||
if err != nil {
|
||||
log.Errorf("error fetching user info endpoint: %v", err)
|
||||
return claims, "", status.Errorf(codes.Internal, "invalid userinfo response")
|
||||
return claims, "", status.Errorf(codes.Unauthenticated, "invalid session: %v", err)
|
||||
}
|
||||
if groupClaims["sub"] != userInfo["sub"] {
|
||||
return claims, "", status.Error(codes.Unknown, "subject of claims from user info endpoint didn't match subject of idToken, see https://openid.net/specs/openid-connect-core-1_0.html#UserInfo")
|
||||
}
|
||||
groupClaims["groups"] = userInfo["groups"]
|
||||
}
|
||||
|
||||
return groupClaims, newToken, nil
|
||||
return finalClaims, newToken, nil
|
||||
}
|
||||
|
||||
// getToken extracts the token from gRPC metadata or cookie headers
|
||||
|
||||
@@ -8,7 +8,8 @@ sonar.projectVersion=1.0
|
||||
# Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows.
|
||||
sonar.sources=.
|
||||
|
||||
sonar.exclusions=docs/**
|
||||
# Exclude docs and testdata with kustomization files (Sonar IaC parser fails on empty/edge-case YAML)
|
||||
sonar.exclusions=docs/**,**/testdata/**
|
||||
|
||||
# Encoding of the source code. Default is default system encoding
|
||||
sonar.sourceEncoding=UTF-8
|
||||
@@ -24,5 +25,5 @@ sonar.go.exclusions=**/vendor/**,**/*.pb.go,**/*_test.go,**/*.pb.gw.go,**/mocks/
|
||||
# Exclude following set of patterns from duplication detection
|
||||
sonar.cpd.exclusions=**/*.pb.go,**/*.g.cs,**/*.gw.go,**/mocks/*,docs/**
|
||||
|
||||
# Exclude test manifests from analysis
|
||||
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**
|
||||
# Exclude test manifests from analysis (avoids Sonar IaC parser errors on empty/edge-case kustomization files)
|
||||
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**,util/app/discovery/testdata/**,reposerver/repository/testdata/**
|
||||
|
||||
@@ -3078,6 +3078,12 @@ func TestDeletionConfirmation(t *testing.T) {
|
||||
Then().Expect(OperationPhaseIs(OperationRunning)).
|
||||
When().ConfirmDeletion().
|
||||
Then().Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
// Wait for controller caches to fully settle before deletion
|
||||
// This ensures both the informer and cluster watcher have the latest state
|
||||
When().Refresh(RefreshTypeNormal).
|
||||
Then().
|
||||
When().Delete(true).
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
|
||||
@@ -41,10 +41,10 @@ func TestSyncWithNoDestinationServiceAccountsInProject(t *testing.T) {
|
||||
Given(t).
|
||||
Path("guestbook").
|
||||
When().
|
||||
WithImpersonationEnabled("", nil).
|
||||
CreateFromFile(func(app *v1alpha1.Application) {
|
||||
app.Spec.SyncPolicy = &v1alpha1.SyncPolicy{Automated: &v1alpha1.SyncPolicyAutomated{}}
|
||||
}).
|
||||
WithImpersonationEnabled("", nil).
|
||||
Then().
|
||||
// With the impersonation feature enabled, Application sync must fail
|
||||
// when there are no destination service accounts configured in AppProject
|
||||
|
||||
@@ -37,15 +37,17 @@ export const ApplicationHydrateOperationState: React.FunctionComponent<Props> =
|
||||
if (hydrateOperationState.finishedAt && hydrateOperationState.phase !== 'Hydrating') {
|
||||
operationAttributes.push({title: 'FINISHED AT', value: <Timestamp date={hydrateOperationState.finishedAt} />});
|
||||
}
|
||||
operationAttributes.push({
|
||||
title: 'DRY REVISION',
|
||||
value: (
|
||||
<div>
|
||||
<Revision repoUrl={hydrateOperationState.sourceHydrator.drySource.repoURL} revision={hydrateOperationState.drySHA} />
|
||||
</div>
|
||||
)
|
||||
});
|
||||
if (hydrateOperationState.finishedAt) {
|
||||
if (hydrateOperationState.drySHA) {
|
||||
operationAttributes.push({
|
||||
title: 'DRY REVISION',
|
||||
value: (
|
||||
<div>
|
||||
<Revision repoUrl={hydrateOperationState.sourceHydrator.drySource.repoURL} revision={hydrateOperationState.drySHA} />
|
||||
</div>
|
||||
)
|
||||
});
|
||||
}
|
||||
if (hydrateOperationState.finishedAt && hydrateOperationState.hydratedSHA) {
|
||||
operationAttributes.push({
|
||||
title: 'HYDRATED REVISION',
|
||||
value: (
|
||||
|
||||
@@ -272,13 +272,15 @@ export const ApplicationNodeInfo = (props: {
|
||||
Resource not found in cluster:{' '}
|
||||
{`${props?.controlled?.state?.targetState?.apiVersion}/${props?.controlled?.state?.targetState?.kind}:${props.node.name}`}
|
||||
<br />
|
||||
{props?.controlled?.state?.normalizedLiveState?.apiVersion && (
|
||||
<span>
|
||||
Please update your resource specification to use the latest Kubernetes API resources supported by the target cluster. The
|
||||
recommended syntax is{' '}
|
||||
{`${props.controlled.state.normalizedLiveState.apiVersion}/${props?.controlled.state.normalizedLiveState?.kind}:${props.node.name}`}
|
||||
</span>
|
||||
)}
|
||||
{props?.controlled?.state?.normalizedLiveState?.apiVersion &&
|
||||
`${props?.controlled?.state?.targetState?.apiVersion}/${props?.controlled?.state?.targetState?.kind}:${props.node.name}` !==
|
||||
`${props.controlled.state.normalizedLiveState.apiVersion}/${props?.controlled.state.normalizedLiveState?.kind}:${props.node.name}` && (
|
||||
<span>
|
||||
Please update your resource specification to use the latest Kubernetes API resources supported by the target cluster. The
|
||||
recommended syntax is{' '}
|
||||
{`${props.controlled.state.normalizedLiveState.apiVersion}/${props?.controlled.state.normalizedLiveState?.kind}:${props.node.name}`}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</React.Fragment>
|
||||
|
||||
@@ -511,7 +511,9 @@ export interface HydrateOperation {
|
||||
finishedAt?: models.Time;
|
||||
phase: HydrateOperationPhase;
|
||||
message: string;
|
||||
// drySHA is the sha of the DRY commit being hydrated. This will be empty if the operation is not successful.
|
||||
drySHA: string;
|
||||
// hydratedSHA is the sha of the hydrated commit. This will be empty if the operation is not successful.
|
||||
hydratedSHA: string;
|
||||
sourceHydrator: SourceHydrator;
|
||||
}
|
||||
|
||||
@@ -97,23 +97,41 @@ func CheckOutOfBoundsSymlinks(basePath string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// GetAppRefreshPaths returns the list of paths that should trigger a refresh for an application
|
||||
func GetAppRefreshPaths(app *v1alpha1.Application) []string {
|
||||
// GetSourceRefreshPaths returns the list of paths that should trigger a refresh for an application.
|
||||
// The source parameter influences the returned refresh paths:
|
||||
// - if source hydrator configured AND source is syncSource: use sync source path (ignores annotation)
|
||||
// - if source hydrator configured AND source is drySource WITH annotation: use annotation paths with drySource base
|
||||
// - if source hydrator not configured: use annotation paths with source base, or empty if no annotation
|
||||
func GetSourceRefreshPaths(app *v1alpha1.Application, source v1alpha1.ApplicationSource) []string {
|
||||
annotationPaths, hasAnnotation := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
|
||||
|
||||
if app.Spec.SourceHydrator != nil {
|
||||
syncSource := app.Spec.SourceHydrator.GetSyncSource()
|
||||
|
||||
// if source is syncSource use the source path
|
||||
if (source).Equals(&syncSource) {
|
||||
return []string{source.Path}
|
||||
}
|
||||
}
|
||||
|
||||
var paths []string
|
||||
if val, ok := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]; ok && val != "" {
|
||||
for _, item := range strings.Split(val, ";") {
|
||||
if hasAnnotation && annotationPaths != "" {
|
||||
for _, item := range strings.Split(annotationPaths, ";") {
|
||||
// skip empty paths
|
||||
if item == "" {
|
||||
continue
|
||||
}
|
||||
// if absolute path, add as is
|
||||
if filepath.IsAbs(item) {
|
||||
paths = append(paths, item[1:])
|
||||
} else {
|
||||
for _, source := range app.Spec.GetSources() {
|
||||
paths = append(paths, filepath.Clean(filepath.Join(source.Path, item)))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// add the path relative to the source path
|
||||
paths = append(paths, filepath.Clean(filepath.Join(source.Path, item)))
|
||||
}
|
||||
}
|
||||
|
||||
return paths
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
fileutil "github.com/argoproj/argo-cd/v3/test/fixture/path"
|
||||
@@ -100,96 +101,38 @@ func TestAbsSymlink(t *testing.T) {
|
||||
assert.Equal(t, "abslink", oobError.File)
|
||||
}
|
||||
|
||||
func getApp(annotation string, sourcePath string) *v1alpha1.Application {
|
||||
return &v1alpha1.Application{
|
||||
func getApp(annotation *string, sourcePath *string) *v1alpha1.Application {
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1alpha1.AnnotationKeyManifestGeneratePaths: annotation,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Path: sourcePath,
|
||||
},
|
||||
Name: "test-app",
|
||||
},
|
||||
}
|
||||
if annotation != nil {
|
||||
app.Annotations = make(map[string]string)
|
||||
app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths] = *annotation
|
||||
}
|
||||
|
||||
if sourcePath != nil {
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{
|
||||
Path: *sourcePath,
|
||||
}
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func getMultiSourceApp(annotation string, paths ...string) *v1alpha1.Application {
|
||||
var sources v1alpha1.ApplicationSources
|
||||
for _, path := range paths {
|
||||
sources = append(sources, v1alpha1.ApplicationSource{Path: path})
|
||||
}
|
||||
return &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1alpha1.AnnotationKeyManifestGeneratePaths: annotation,
|
||||
},
|
||||
func getSourceHydratorApp(annotation *string, drySourcePath string, syncSourcePath string) *v1alpha1.Application {
|
||||
app := getApp(annotation, nil)
|
||||
app.Spec.SourceHydrator = &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
Path: drySourcePath,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: sources,
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
Path: syncSourcePath,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func Test_AppFilesHaveChanged(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *v1alpha1.Application
|
||||
files []string
|
||||
changeExpected bool
|
||||
}{
|
||||
{"default no path", &v1alpha1.Application{}, []string{"README.md"}, true},
|
||||
{"no files changed", getApp(".", "source/path"), []string{}, true},
|
||||
{"relative path - matching", getApp(".", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"relative path, multi source - matching #1", getMultiSourceApp(".", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"relative path, multi source - matching #2", getMultiSourceApp(".", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"relative path - not matching", getApp(".", "source/path"), []string{"README.md"}, false},
|
||||
{"relative path, multi source - not matching", getMultiSourceApp(".", "other/path", "unrelated/path"), []string{"README.md"}, false},
|
||||
{"absolute path - matching", getApp("/source/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"absolute path, multi source - matching #1", getMultiSourceApp("/source/path", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"absolute path, multi source - matching #2", getMultiSourceApp("/source/path", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"absolute path - not matching", getApp("/source/path1", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
|
||||
{"absolute path, multi source - not matching", getMultiSourceApp("/source/path1", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
|
||||
{"glob path * - matching", getApp("/source/**/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"glob path * - not matching", getApp("/source/**/my-service.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
|
||||
{"glob path ? - matching", getApp("/source/path/my-deployment-?.yaml", "source/path"), []string{"source/path/my-deployment-0.yaml"}, true},
|
||||
{"glob path ? - not matching", getApp("/source/path/my-deployment-?.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
|
||||
{"glob path char range - matching", getApp("/source/path[0-9]/my-deployment.yaml", "source/path"), []string{"source/path1/my-deployment.yaml"}, true},
|
||||
{"glob path char range - not matching", getApp("/source/path[0-9]/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
|
||||
{"mixed glob path - matching", getApp("/source/path[0-9]/my-*.yaml", "source/path"), []string{"source/path1/my-deployment.yaml"}, true},
|
||||
{"mixed glob path - not matching", getApp("/source/path[0-9]/my-*.yaml", "source/path"), []string{"README.md"}, false},
|
||||
{"two relative paths - matching", getApp(".;../shared", "my-app"), []string{"shared/my-deployment.yaml"}, true},
|
||||
{"two relative paths, multi source - matching #1", getMultiSourceApp(".;../shared", "my-app", "other/path"), []string{"shared/my-deployment.yaml"}, true},
|
||||
{"two relative paths, multi source - matching #2", getMultiSourceApp(".;../shared", "my-app", "other/path"), []string{"shared/my-deployment.yaml"}, true},
|
||||
{"two relative paths - not matching", getApp(".;../shared", "my-app"), []string{"README.md"}, false},
|
||||
{"two relative paths, multi source - not matching", getMultiSourceApp(".;../shared", "my-app", "other/path"), []string{"README.md"}, false},
|
||||
{"file relative path - matching", getApp("./my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"file relative path, multi source - matching #1", getMultiSourceApp("./my-deployment.yaml", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"file relative path, multi source - matching #2", getMultiSourceApp("./my-deployment.yaml", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"file relative path - not matching", getApp("./my-deployment.yaml", "source/path"), []string{"README.md"}, false},
|
||||
{"file relative path, multi source - not matching", getMultiSourceApp("./my-deployment.yaml", "source/path", "other/path"), []string{"README.md"}, false},
|
||||
{"file absolute path - matching", getApp("/source/path/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"file absolute path, multi source - matching #1", getMultiSourceApp("/source/path/my-deployment.yaml", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"file absolute path, multi source - matching #2", getMultiSourceApp("/source/path/my-deployment.yaml", "other/path", "source/path"), []string{"source/path/my-deployment.yaml"}, true},
|
||||
{"file absolute path - not matching", getApp("/source/path1/README.md", "source/path"), []string{"source/path/my-deployment.yaml"}, false},
|
||||
{"file absolute path, multi source - not matching", getMultiSourceApp("/source/path1/README.md", "source/path", "other/path"), []string{"source/path/my-deployment.yaml"}, false},
|
||||
{"file two relative paths - matching", getApp("./README.md;../shared/my-deployment.yaml", "my-app"), []string{"shared/my-deployment.yaml"}, true},
|
||||
{"file two relative paths, multi source - matching", getMultiSourceApp("./README.md;../shared/my-deployment.yaml", "my-app", "other-path"), []string{"shared/my-deployment.yaml"}, true},
|
||||
{"file two relative paths - not matching", getApp(".README.md;../shared/my-deployment.yaml", "my-app"), []string{"kustomization.yaml"}, false},
|
||||
{"file two relative paths, multi source - not matching", getMultiSourceApp(".README.md;../shared/my-deployment.yaml", "my-app", "other-path"), []string{"kustomization.yaml"}, false},
|
||||
{"changed file absolute path - matching", getApp(".", "source/path"), []string{"/source/path/my-deployment.yaml"}, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
ttc := tt
|
||||
t.Run(ttc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
refreshPaths := GetAppRefreshPaths(ttc.app)
|
||||
assert.Equal(t, ttc.changeExpected, AppFilesHaveChanged(refreshPaths, ttc.files), "AppFilesHaveChanged()")
|
||||
})
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
func Test_GetAppRefreshPaths(t *testing.T) {
|
||||
@@ -198,23 +141,64 @@ func Test_GetAppRefreshPaths(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
app *v1alpha1.Application
|
||||
source v1alpha1.ApplicationSource
|
||||
expectedPaths []string
|
||||
}{
|
||||
{"default no path", &v1alpha1.Application{}, []string{}},
|
||||
{"relative path", getApp(".", "source/path"), []string{"source/path"}},
|
||||
{"absolute path - multi source", getMultiSourceApp("/source/path", "source/path", "other/path"), []string{"source/path"}},
|
||||
{"two relative paths ", getApp(".;../shared", "my-app"), []string{"my-app", "shared"}},
|
||||
{"file relative path", getApp("./my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}},
|
||||
{"file absolute path", getApp("/source/path/my-deployment.yaml", "source/path"), []string{"source/path/my-deployment.yaml"}},
|
||||
{"file two relative paths", getApp("./README.md;../shared/my-deployment.yaml", "my-app"), []string{"my-app/README.md", "shared/my-deployment.yaml"}},
|
||||
{"glob path", getApp("/source/*/my-deployment.yaml", "source/path"), []string{"source/*/my-deployment.yaml"}},
|
||||
{"empty path", getApp(".;", "source/path"), []string{"source/path"}},
|
||||
{
|
||||
name: "single source without annotation",
|
||||
app: getApp(nil, ptr.To("source/path")),
|
||||
source: v1alpha1.ApplicationSource{Path: "source/path"},
|
||||
expectedPaths: []string{},
|
||||
},
|
||||
{
|
||||
name: "single source with annotation",
|
||||
app: getApp(ptr.To(".;dev/deploy;other/path"), ptr.To("source/path")),
|
||||
source: v1alpha1.ApplicationSource{Path: "source/path"},
|
||||
expectedPaths: []string{"source/path", "source/path/dev/deploy", "source/path/other/path"},
|
||||
},
|
||||
{
|
||||
name: "single source with empty annotation",
|
||||
app: getApp(ptr.To(".;;"), ptr.To("source/path")),
|
||||
source: v1alpha1.ApplicationSource{Path: "source/path"},
|
||||
expectedPaths: []string{"source/path"},
|
||||
},
|
||||
{
|
||||
name: "single source with absolute path annotation",
|
||||
app: getApp(ptr.To("/fullpath/deploy;other/path"), ptr.To("source/path")),
|
||||
source: v1alpha1.ApplicationSource{Path: "source/path"},
|
||||
expectedPaths: []string{"fullpath/deploy", "source/path/other/path"},
|
||||
},
|
||||
{
|
||||
name: "source hydrator sync source without annotation",
|
||||
app: getSourceHydratorApp(nil, "dry/path", "sync/path"),
|
||||
source: v1alpha1.ApplicationSource{Path: "sync/path"},
|
||||
expectedPaths: []string{"sync/path"},
|
||||
},
|
||||
{
|
||||
name: "source hydrator dry source without annotation",
|
||||
app: getSourceHydratorApp(nil, "dry/path", "sync/path"),
|
||||
source: v1alpha1.ApplicationSource{Path: "dry/path"},
|
||||
expectedPaths: []string{},
|
||||
},
|
||||
{
|
||||
name: "source hydrator sync source with annotation",
|
||||
app: getSourceHydratorApp(ptr.To("deploy"), "dry/path", "sync/path"),
|
||||
source: v1alpha1.ApplicationSource{Path: "sync/path"},
|
||||
expectedPaths: []string{"sync/path"},
|
||||
},
|
||||
{
|
||||
name: "source hydrator dry source with annotation",
|
||||
app: getSourceHydratorApp(ptr.To("deploy"), "dry/path", "sync/path"),
|
||||
source: v1alpha1.ApplicationSource{Path: "dry/path"},
|
||||
expectedPaths: []string{"dry/path/deploy"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ttc := tt
|
||||
t.Run(ttc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.ElementsMatch(t, ttc.expectedPaths, GetAppRefreshPaths(ttc.app), "GetAppRefreshPath()")
|
||||
assert.ElementsMatch(t, ttc.expectedPaths, GetSourceRefreshPaths(ttc.app, ttc.source), "GetAppRefreshPath()")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,7 @@ package db
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -141,22 +139,6 @@ func NewDB(namespace string, settingsMgr *settings.SettingsManager, kubeclientse
|
||||
}
|
||||
}
|
||||
|
||||
func (db *db) getSecret(name string, cache map[string]*corev1.Secret) (*corev1.Secret, error) {
|
||||
if _, ok := cache[name]; !ok {
|
||||
secret, err := db.settingsMgr.GetSecretByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cache[name] = secret
|
||||
}
|
||||
return cache[name], nil
|
||||
}
|
||||
|
||||
// StripCRLFCharacter strips the trailing CRLF characters
|
||||
func StripCRLFCharacter(input string) string {
|
||||
return strings.TrimSpace(input)
|
||||
}
|
||||
|
||||
// GetApplicationControllerReplicas gets the replicas of application controller
|
||||
func (db *db) GetApplicationControllerReplicas() int {
|
||||
// get the replicas from application controller deployment, if the application controller deployment does not exist, check for environment variable
|
||||
|
||||
@@ -163,7 +163,7 @@ func TestCreateWriteRepoCredentials(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "https://github.com/argoproj/", creds.URL)
|
||||
|
||||
secret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), RepoURLToSecretName(credSecretPrefix, creds.URL, ""), metav1.GetOptions{})
|
||||
secret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), RepoURLToSecretName(credWriteSecretPrefix, creds.URL, ""), metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, common.AnnotationValueManagedByArgoCD, secret.Annotations[common.AnnotationKeyManagedBy])
|
||||
|
||||
@@ -19,8 +19,12 @@ import (
|
||||
const (
|
||||
// Prefix to use for naming repository secrets
|
||||
repoSecretPrefix = "repo"
|
||||
// Prefix to use for naming repository write secrets
|
||||
repoWriteSecretPrefix = "repo-write"
|
||||
// Prefix to use for naming credential template secrets
|
||||
credSecretPrefix = "creds"
|
||||
// Prefix to use for naming write credential template secrets
|
||||
credWriteSecretPrefix = "creds-write"
|
||||
// The name of the key storing the username in the secret
|
||||
username = "username"
|
||||
// The name of the key storing the password in the secret
|
||||
|
||||
@@ -26,7 +26,11 @@ type secretsRepositoryBackend struct {
|
||||
}
|
||||
|
||||
func (s *secretsRepositoryBackend) CreateRepository(ctx context.Context, repository *appsv1.Repository) (*appsv1.Repository, error) {
|
||||
secName := RepoURLToSecretName(repoSecretPrefix, repository.Repo, repository.Project)
|
||||
secretPrefix := repoSecretPrefix
|
||||
if s.writeCreds {
|
||||
secretPrefix = repoWriteSecretPrefix
|
||||
}
|
||||
secName := RepoURLToSecretName(secretPrefix, repository.Repo, repository.Project)
|
||||
|
||||
repositorySecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -60,12 +64,8 @@ func (s *secretsRepositoryBackend) CreateRepository(ctx context.Context, reposit
|
||||
// the label is found and false otherwise. Will return false if no secret is found with the given
|
||||
// name.
|
||||
func (s *secretsRepositoryBackend) hasRepoTypeLabel(secretName string) (bool, error) {
|
||||
noCache := make(map[string]*corev1.Secret)
|
||||
sec, err := s.db.getSecret(secretName, noCache)
|
||||
sec, err := s.db.settingsMgr.GetSecretByName(secretName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
_, ok := sec.GetLabels()[common.LabelKeySecretType]
|
||||
@@ -76,7 +76,7 @@ func (s *secretsRepositoryBackend) hasRepoTypeLabel(secretName string) (bool, er
|
||||
}
|
||||
|
||||
func (s *secretsRepositoryBackend) GetRepoCredsBySecretName(_ context.Context, name string) (*appsv1.RepoCreds, error) {
|
||||
secret, err := s.db.getSecret(name, map[string]*corev1.Secret{})
|
||||
secret, err := s.db.settingsMgr.GetSecretByName(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret %s: %w", name, err)
|
||||
}
|
||||
@@ -179,7 +179,11 @@ func (s *secretsRepositoryBackend) RepositoryExists(_ context.Context, repoURL,
|
||||
}
|
||||
|
||||
func (s *secretsRepositoryBackend) CreateRepoCreds(ctx context.Context, repoCreds *appsv1.RepoCreds) (*appsv1.RepoCreds, error) {
|
||||
secName := RepoURLToSecretName(credSecretPrefix, repoCreds.URL, "")
|
||||
secretPrefix := credSecretPrefix
|
||||
if s.writeCreds {
|
||||
secretPrefix = credWriteSecretPrefix
|
||||
}
|
||||
secName := RepoURLToSecretName(secretPrefix, repoCreds.URL, "")
|
||||
|
||||
repoCredsSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -400,6 +404,8 @@ func secretToRepository(secret *corev1.Secret) (*appsv1.Repository, error) {
|
||||
return repository, nil
|
||||
}
|
||||
|
||||
// repositoryToSecret updates the given secret with the data from the repository object. It adds the appropriate
|
||||
// labels/annotations, but it does not add any name or namespace metadata.
|
||||
func (s *secretsRepositoryBackend) repositoryToSecret(repository *appsv1.Repository, secret *corev1.Secret) *corev1.Secret {
|
||||
secretCopy := secret.DeepCopy()
|
||||
|
||||
|
||||
@@ -11,13 +11,9 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
appsv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
@@ -39,8 +35,8 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
InsecureIgnoreHostKey: false,
|
||||
EnableLFS: true,
|
||||
}
|
||||
setupWithK8sObjects := func(objects ...runtime.Object) *fixture {
|
||||
clientset := getClientset(objects...)
|
||||
setup := func() *fixture {
|
||||
clientset := getClientset()
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
|
||||
repoBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
@@ -55,7 +51,7 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
t.Run("will create repository successfully", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
f := setupWithK8sObjects()
|
||||
f := setup()
|
||||
|
||||
// when
|
||||
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
@@ -85,21 +81,30 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
t.Run("will return proper error if secret does not have expected label", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
secret := &corev1.Secret{}
|
||||
s := secretsRepositoryBackend{}
|
||||
updatedSecret := s.repositoryToSecret(repo, secret)
|
||||
delete(updatedSecret.Labels, common.LabelKeySecretType)
|
||||
f := setupWithK8sObjects(updatedSecret)
|
||||
f.clientSet.ReactionChain = nil
|
||||
f.clientSet.AddReactor("create", "secrets", func(_ k8stesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
gr := schema.GroupResource{
|
||||
Group: "v1",
|
||||
Resource: "secrets",
|
||||
}
|
||||
return true, nil, apierrors.NewAlreadyExists(gr, "already exists")
|
||||
})
|
||||
f := setup()
|
||||
|
||||
// when
|
||||
_, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
|
||||
// given - remove the label from the secret
|
||||
secret, err := f.clientSet.CoreV1().Secrets(testNamespace).Get(
|
||||
t.Context(),
|
||||
RepoURLToSecretName(repoSecretPrefix, repo.Repo, ""),
|
||||
metav1.GetOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
delete(secret.Labels, common.LabelKeySecretType)
|
||||
_, err = f.clientSet.CoreV1().Secrets(testNamespace).Update(t.Context(), secret, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Resync informers to ensure the cache reflects the updated secret
|
||||
err = f.repoBackend.db.settingsMgr.ResyncInformers()
|
||||
require.NoError(t, err)
|
||||
|
||||
// when - try to create the same repository again
|
||||
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
@@ -107,41 +112,20 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
assert.Nil(t, output)
|
||||
status, ok := status.FromError(err)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, codes.InvalidArgument, status.Code())
|
||||
assert.Equal(t, codes.InvalidArgument, status.Code(), "got unexpected error: %v", err)
|
||||
})
|
||||
t.Run("will return proper error if secret already exists", func(t *testing.T) {
|
||||
t.Run("will return proper error if secret already exists and does have the proper label", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
secName := RepoURLToSecretName(repoSecretPrefix, repo.Repo, "")
|
||||
secret := &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secName,
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
s := secretsRepositoryBackend{}
|
||||
updatedSecret := s.repositoryToSecret(repo, secret)
|
||||
f := setupWithK8sObjects(updatedSecret)
|
||||
f.clientSet.ReactionChain = nil
|
||||
f.clientSet.WatchReactionChain = nil
|
||||
f.clientSet.AddReactor("create", "secrets", func(_ k8stesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
gr := schema.GroupResource{
|
||||
Group: "v1",
|
||||
Resource: "secrets",
|
||||
}
|
||||
return true, nil, apierrors.NewAlreadyExists(gr, "already exists")
|
||||
})
|
||||
watcher := watch.NewFakeWithChanSize(1, true)
|
||||
watcher.Add(updatedSecret)
|
||||
f.clientSet.AddWatchReactor("secrets", func(_ k8stesting.Action) (handled bool, ret watch.Interface, err error) {
|
||||
return true, watcher, nil
|
||||
})
|
||||
f := setup()
|
||||
|
||||
// when
|
||||
_, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
|
||||
// when - try to create the same repository again
|
||||
output, err := f.repoBackend.CreateRepository(t.Context(), repo)
|
||||
|
||||
// then
|
||||
@@ -149,7 +133,7 @@ func TestSecretsRepositoryBackend_CreateRepository(t *testing.T) {
|
||||
assert.Nil(t, output)
|
||||
status, ok := status.FromError(err)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, codes.AlreadyExists, status.Code())
|
||||
assert.Equal(t, codes.AlreadyExists, status.Code(), "got unexpected error: %v", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1180,3 +1164,86 @@ func TestRaceConditionInRepositoryOperations(t *testing.T) {
|
||||
assert.Equal(t, repo.Username, finalRepo.Username)
|
||||
assert.Equal(t, repo.Password, finalRepo.Password)
|
||||
}
|
||||
|
||||
func TestCreateReadAndWriteSecretForSameURL(t *testing.T) {
|
||||
clientset := getClientset()
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
|
||||
|
||||
repo := &appsv1.Repository{
|
||||
Name: "TestRepo",
|
||||
Repo: "git@github.com:argoproj/argo-cd.git",
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
}
|
||||
|
||||
// Create read secret
|
||||
readBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: false}
|
||||
_, err := readBackend.CreateRepository(t.Context(), repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create write secret
|
||||
writeBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: true}
|
||||
_, err = writeBackend.CreateRepository(t.Context(), repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert both secrets exist
|
||||
readSecretName := RepoURLToSecretName(repoSecretPrefix, repo.Repo, repo.Project)
|
||||
writeSecretName := RepoURLToSecretName(repoWriteSecretPrefix, repo.Repo, repo.Project)
|
||||
|
||||
readSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), readSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepository, readSecret.Labels[common.LabelKeySecretType])
|
||||
|
||||
writeSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), writeSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepositoryWrite, writeSecret.Labels[common.LabelKeySecretType])
|
||||
}
|
||||
|
||||
func TestCreateReadAndWriteRepoCredsSecretForSameURL(t *testing.T) {
|
||||
clientset := getClientset()
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), clientset, testNamespace)
|
||||
|
||||
creds := &appsv1.RepoCreds{
|
||||
URL: "git@github.com:argoproj/argo-cd.git",
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
}
|
||||
|
||||
// Create read creds secret
|
||||
readBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: false}
|
||||
_, err := readBackend.CreateRepoCreds(t.Context(), creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create write creds secret
|
||||
writeBackend := &secretsRepositoryBackend{db: &db{
|
||||
ns: testNamespace,
|
||||
kubeclientset: clientset,
|
||||
settingsMgr: settingsMgr,
|
||||
}, writeCreds: true}
|
||||
_, err = writeBackend.CreateRepoCreds(t.Context(), creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert both secrets exist
|
||||
readSecretName := RepoURLToSecretName(credSecretPrefix, creds.URL, "")
|
||||
writeSecretName := RepoURLToSecretName(credWriteSecretPrefix, creds.URL, "")
|
||||
|
||||
readSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), readSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepoCreds, readSecret.Labels[common.LabelKeySecretType])
|
||||
|
||||
writeSecret, err := clientset.CoreV1().Secrets(testNamespace).Get(t.Context(), writeSecretName, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.LabelValueSecretTypeRepoCredsWrite, writeSecret.Labels[common.LabelKeySecretType])
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package healthz
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -11,9 +12,13 @@ import (
|
||||
// ServeHealthCheck relies on the provided function to return an error if unhealthy and nil otherwise.
|
||||
func ServeHealthCheck(mux *http.ServeMux, f func(r *http.Request) error) {
|
||||
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
startTs := time.Now()
|
||||
if err := f(r); err != nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
log.Errorln(w, err)
|
||||
log.WithFields(log.Fields{
|
||||
"duration": time.Since(startTs),
|
||||
"component": "healthcheck",
|
||||
}).WithError(err).Error("Error serving health check request")
|
||||
} else {
|
||||
fmt.Fprintln(w, "ok")
|
||||
}
|
||||
|
||||
@@ -5,16 +5,22 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHealthCheck(t *testing.T) {
|
||||
sentinel := false
|
||||
|
||||
lc := &net.ListenConfig{}
|
||||
ctx := t.Context()
|
||||
svcErrMsg := "This is a dummy error"
|
||||
serve := func(c chan<- string) {
|
||||
// listen on first available dynamic (unprivileged) port
|
||||
listener, err := net.Listen("tcp", ":0")
|
||||
listener, err := lc.Listen(ctx, "tcp", ":0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -25,7 +31,7 @@ func TestHealthCheck(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
ServeHealthCheck(mux, func(_ *http.Request) error {
|
||||
if sentinel {
|
||||
return errors.New("This is a dummy error")
|
||||
return errors.New(svcErrMsg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -47,7 +53,23 @@ func TestHealthCheck(t *testing.T) {
|
||||
require.Equalf(t, http.StatusOK, resp.StatusCode, "Was expecting status code 200 from health check, but got %d instead", resp.StatusCode)
|
||||
|
||||
sentinel = true
|
||||
hook := test.NewGlobal()
|
||||
|
||||
resp, _ = http.Get(server + "/healthz")
|
||||
require.Equalf(t, http.StatusServiceUnavailable, resp.StatusCode, "Was expecting status code 503 from health check, but got %d instead", resp.StatusCode)
|
||||
assert.NotEmpty(t, hook.Entries, "Was expecting at least one log entry from health check, but got none")
|
||||
expectedMsg := "Error serving health check request"
|
||||
var foundEntry log.Entry
|
||||
for _, entry := range hook.Entries {
|
||||
if entry.Level == log.ErrorLevel &&
|
||||
entry.Message == expectedMsg {
|
||||
foundEntry = entry
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotEmpty(t, foundEntry, "Expected an error message '%s', but it was't found", expectedMsg)
|
||||
actualErr, ok := foundEntry.Data["error"].(error)
|
||||
require.True(t, ok, "Expected 'error' field to contain an error, but it doesn't")
|
||||
assert.Equal(t, svcErrMsg, actualErr.Error(), "expected original error message '"+svcErrMsg+"', but got '"+actualErr.Error()+"'")
|
||||
assert.Greater(t, foundEntry.Data["duration"].(time.Duration), time.Duration(0))
|
||||
}
|
||||
|
||||
@@ -430,7 +430,7 @@ func isContentLayer(mediaType string) bool {
|
||||
|
||||
func isCompressedLayer(mediaType string) bool {
|
||||
// TODO: Is zstd something which is used in the wild? For now let's stick to these suffixes
|
||||
return strings.HasSuffix(mediaType, "tar+gzip") || strings.HasSuffix(mediaType, "tar")
|
||||
return strings.HasSuffix(mediaType, "tar+gzip") || strings.HasSuffix(mediaType, "tar.gzip") || strings.HasSuffix(mediaType, "tar")
|
||||
}
|
||||
|
||||
func createTarFile(from, to string) error {
|
||||
@@ -531,7 +531,7 @@ func (s *compressedLayerExtracterStore) Push(ctx context.Context, desc imagev1.D
|
||||
}
|
||||
defer os.RemoveAll(srcDir)
|
||||
|
||||
if strings.HasSuffix(desc.MediaType, "tar+gzip") {
|
||||
if strings.HasSuffix(desc.MediaType, "tar+gzip") || strings.HasSuffix(desc.MediaType, "tar.gzip") {
|
||||
err = files.Untgz(srcDir, content, s.maxSize, false)
|
||||
} else {
|
||||
err = files.Untar(srcDir, content, s.maxSize, false)
|
||||
|
||||
@@ -254,6 +254,31 @@ func Test_nativeOCIClient_Extract(t *testing.T) {
|
||||
disableManifestMaxExtractedSize: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "extraction with docker rootfs tar.gzip layer",
|
||||
fields: fields{
|
||||
allowedMediaTypes: []string{"application/vnd.docker.image.rootfs.diff.tar.gzip"},
|
||||
},
|
||||
args: args{
|
||||
digestFunc: func(store *memory.Store) string {
|
||||
layerBlob := createGzippedTarWithContent(t, "foo.yaml", "some content")
|
||||
return generateManifest(t, store, layerConf{content.NewDescriptorFromBytes("application/vnd.docker.image.rootfs.diff.tar.gzip", layerBlob), layerBlob})
|
||||
},
|
||||
postValidationFunc: func(_, path string, _ Client, _ fields, _ args) {
|
||||
manifestDir, err := os.ReadDir(path)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, manifestDir, 1)
|
||||
require.Equal(t, "foo.yaml", manifestDir[0].Name())
|
||||
f, err := os.Open(filepath.Join(path, manifestDir[0].Name()))
|
||||
require.NoError(t, err)
|
||||
contents, err := io.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "some content", string(contents))
|
||||
},
|
||||
manifestMaxExtractedSize: 1000,
|
||||
disableManifestMaxExtractedSize: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "extraction with standard gzip layer using cache",
|
||||
fields: fields{
|
||||
|
||||
@@ -493,7 +493,7 @@ func (a *ClientApp) HandleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
sub := jwtutil.StringField(claims, "sub")
|
||||
err = a.clientCache.Set(&cache.Item{
|
||||
Key: formatAccessTokenCacheKey(sub),
|
||||
Key: FormatAccessTokenCacheKey(sub),
|
||||
Object: encToken,
|
||||
CacheActionOpts: cache.CacheActionOpts{
|
||||
Expiration: getTokenExpiration(claims),
|
||||
@@ -640,6 +640,39 @@ func createClaimsAuthenticationRequestParameter(requestedClaims map[string]*oidc
|
||||
return oauth2.SetAuthURLParam("claims", string(claimsRequestRAW)), nil
|
||||
}
|
||||
|
||||
// SetGroupsFromUserInfo takes a claims object and adds groups claim from userinfo endpoint if available
|
||||
// This is required by some SSO implementations as they don't provide the groups claim in the ID token
|
||||
// If querying the UserInfo endpoint fails, we return an error to indicate the session is invalid
|
||||
// we assume that everywhere in argocd jwt.MapClaims is used as type for interface jwt.Claims
|
||||
// otherwise this would cause a panic
|
||||
func (a *ClientApp) SetGroupsFromUserInfo(claims jwt.Claims, sessionManagerClaimsIssuer string) (jwt.MapClaims, error) {
|
||||
var groupClaims jwt.MapClaims
|
||||
var ok bool
|
||||
if groupClaims, ok = claims.(jwt.MapClaims); !ok {
|
||||
if tmpClaims, ok := claims.(*jwt.MapClaims); ok {
|
||||
if tmpClaims != nil {
|
||||
groupClaims = *tmpClaims
|
||||
}
|
||||
}
|
||||
}
|
||||
iss := jwtutil.StringField(groupClaims, "iss")
|
||||
if iss != sessionManagerClaimsIssuer && a.settings.UserInfoGroupsEnabled() && a.settings.UserInfoPath() != "" {
|
||||
userInfo, unauthorized, err := a.GetUserInfo(groupClaims, a.settings.IssuerURL(), a.settings.UserInfoPath())
|
||||
if unauthorized {
|
||||
return groupClaims, fmt.Errorf("error while quering userinfo endpoint: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return groupClaims, fmt.Errorf("error fetching user info endpoint: %w", err)
|
||||
}
|
||||
if groupClaims["sub"] != userInfo["sub"] {
|
||||
return groupClaims, errors.New("subject of claims from user info endpoint didn't match subject of idToken, see https://openid.net/specs/openid-connect-core-1_0.html#UserInfo")
|
||||
}
|
||||
groupClaims["groups"] = userInfo["groups"]
|
||||
}
|
||||
|
||||
return groupClaims, nil
|
||||
}
|
||||
|
||||
// GetUserInfo queries the IDP userinfo endpoint for claims
|
||||
func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoPath string) (jwt.MapClaims, bool, error) {
|
||||
sub := jwtutil.StringField(actualClaims, "sub")
|
||||
@@ -647,7 +680,7 @@ func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoP
|
||||
var encClaims []byte
|
||||
|
||||
// in case we got it in the cache, we just return the item
|
||||
clientCacheKey := formatUserInfoResponseCacheKey(sub)
|
||||
clientCacheKey := FormatUserInfoResponseCacheKey(sub)
|
||||
if err := a.clientCache.Get(clientCacheKey, &encClaims); err == nil {
|
||||
claimsRaw, err := crypto.Decrypt(encClaims, a.encryptionKey)
|
||||
if err != nil {
|
||||
@@ -664,7 +697,7 @@ func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoP
|
||||
|
||||
// check if the accessToken for the user is still present
|
||||
var encAccessToken []byte
|
||||
err := a.clientCache.Get(formatAccessTokenCacheKey(sub), &encAccessToken)
|
||||
err := a.clientCache.Get(FormatAccessTokenCacheKey(sub), &encAccessToken)
|
||||
// without an accessToken we can't query the user info endpoint
|
||||
// thus the user needs to reauthenticate for argocd to get a new accessToken
|
||||
if errors.Is(err, cache.ErrCacheMiss) {
|
||||
@@ -774,11 +807,11 @@ func getTokenExpiration(claims jwt.MapClaims) time.Duration {
|
||||
}
|
||||
|
||||
// formatUserInfoResponseCacheKey returns the key which is used to store userinfo of user in cache
|
||||
func formatUserInfoResponseCacheKey(sub string) string {
|
||||
func FormatUserInfoResponseCacheKey(sub string) string {
|
||||
return fmt.Sprintf("%s_%s", UserInfoResponseCachePrefix, sub)
|
||||
}
|
||||
|
||||
// formatAccessTokenCacheKey returns the key which is used to store the accessToken of a user in cache
|
||||
func formatAccessTokenCacheKey(sub string) string {
|
||||
func FormatAccessTokenCacheKey(sub string) string {
|
||||
return fmt.Sprintf("%s_%s", AccessTokenCachePrefix, sub)
|
||||
}
|
||||
|
||||
@@ -943,7 +943,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -958,7 +958,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -977,7 +977,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -992,7 +992,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -1011,7 +1011,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -1034,7 +1034,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -1053,7 +1053,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
expectError: true,
|
||||
},
|
||||
},
|
||||
@@ -1086,7 +1086,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
key: formatUserInfoResponseCacheKey("randomUser"),
|
||||
key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
value: "{\"groups\":[\"githubOrg:engineers\"]}",
|
||||
expectEncrypted: true,
|
||||
expectError: false,
|
||||
@@ -1113,7 +1113,7 @@ func TestGetUserInfo(t *testing.T) {
|
||||
encrypt bool
|
||||
}{
|
||||
{
|
||||
key: formatAccessTokenCacheKey("randomUser"),
|
||||
key: FormatAccessTokenCacheKey("randomUser"),
|
||||
value: "FakeAccessToken",
|
||||
encrypt: true,
|
||||
},
|
||||
@@ -1172,3 +1172,94 @@ func TestGetUserInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetGroupsFromUserInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputClaims jwt.MapClaims // function input
|
||||
cacheClaims jwt.MapClaims // userinfo response
|
||||
expectedClaims jwt.MapClaims // function output
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "set correct groups from userinfo endpoint", // enriches the JWT claims with information from the userinfo endpoint, default case
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"githubOrg:example"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []any{"githubOrg:example"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())}, // the groups must be of type any since the response we get was parsed by GetUserInfo and we don't yet know the type of the groups claim
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "return error for wrong userinfo claims returned", // when there's an error in this feature, the claims should be untouched for the rest to still proceed
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: jwt.MapClaims{"sub": "wrongUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "override groups already defined in input claims", // this is expected behavior since input claims might have been truncated (HTTP header 4K limit)
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"superusers", "usergroup", "support-group"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []any{"superusers", "usergroup", "support-group"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "empty cache and non-rechable userinfo endpoint", // this will try to reach the userinfo endpoint defined in the test and fail
|
||||
inputClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
cacheClaims: nil, // the test doesn't set the cache for an empty object
|
||||
expectedClaims: jwt.MapClaims{"sub": "randomUser", "groups": []string{"groupfromjwt"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// create the ClientApp
|
||||
userInfoCache := cache.NewInMemoryCache(24 * time.Hour)
|
||||
signature, err := util.MakeSignature(32)
|
||||
require.NoError(t, err, "failed creating signature for settings object")
|
||||
cdSettings := &settings.ArgoCDSettings{
|
||||
ServerSignature: signature,
|
||||
OIDCConfigRAW: `
|
||||
issuer: http://localhost:63231
|
||||
enableUserInfoGroups: true
|
||||
userInfoPath: /`,
|
||||
}
|
||||
a, err := NewClientApp(cdSettings, "", nil, "/argo-cd", userInfoCache)
|
||||
require.NoError(t, err, "failed creating clientapp")
|
||||
|
||||
// prepoluate cache to predict what the GetUserInfo function will return to the SetGroupsFromUserInfo function (without having to mock the userinfo response)
|
||||
encryptionKey, err := cdSettings.GetServerEncryptionKey()
|
||||
require.NoError(t, err, "failed obtaining encryption key from settings")
|
||||
|
||||
// set fake accessToken for function to not return early
|
||||
encAccessToken, err := crypto.Encrypt([]byte("123456"), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting dummy access token")
|
||||
err = a.clientCache.Set(&cache.Item{
|
||||
Key: FormatAccessTokenCacheKey("randomUser"),
|
||||
Object: encAccessToken,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
|
||||
// set cacheClaims to in-memory cache to let GetUserInfo return early with this information (GetUserInfo has a separate test, here we focus on SetUserInfoGroups)
|
||||
if tt.cacheClaims != nil {
|
||||
cacheClaims, err := json.Marshal(tt.cacheClaims)
|
||||
require.NoError(t, err)
|
||||
encCacheClaims, err := crypto.Encrypt([]byte(cacheClaims), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting dummy access token")
|
||||
err = a.clientCache.Set(&cache.Item{
|
||||
Key: FormatUserInfoResponseCacheKey("randomUser"),
|
||||
Object: encCacheClaims,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
}
|
||||
|
||||
receivedClaims, err := a.SetGroupsFromUserInfo(tt.inputClaims, "argocd")
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, tt.expectedClaims, receivedClaims) // check that the claims were successfully enriched with what we expect
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -480,9 +480,9 @@ func (mgr *SessionManager) VerifyUsernamePassword(username string, password stri
|
||||
|
||||
// AuthMiddlewareFunc returns a function that can be used as an
|
||||
// authentication middleware for HTTP requests.
|
||||
func (mgr *SessionManager) AuthMiddlewareFunc(disabled bool) func(http.Handler) http.Handler {
|
||||
func (mgr *SessionManager) AuthMiddlewareFunc(disabled bool, isSSOConfigured bool, ssoClientApp *oidcutil.ClientApp) func(http.Handler) http.Handler {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return WithAuthMiddleware(disabled, mgr, h)
|
||||
return WithAuthMiddleware(disabled, isSSOConfigured, ssoClientApp, mgr, h)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -495,26 +495,41 @@ type TokenVerifier interface {
|
||||
// WithAuthMiddleware is an HTTP middleware used to ensure incoming
|
||||
// requests are authenticated before invoking the target handler. If
|
||||
// disabled is true, it will just invoke the next handler in the chain.
|
||||
func WithAuthMiddleware(disabled bool, authn TokenVerifier, next http.Handler) http.Handler {
|
||||
func WithAuthMiddleware(disabled bool, isSSOConfigured bool, ssoClientApp *oidcutil.ClientApp, authn TokenVerifier, next http.Handler) http.Handler {
|
||||
if disabled {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !disabled {
|
||||
cookies := r.Cookies()
|
||||
tokenString, err := httputil.JoinCookies(common.AuthCookieName, cookies)
|
||||
if err != nil {
|
||||
http.Error(w, "Auth cookie not found", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
claims, _, err := authn.VerifyToken(tokenString)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
// Add claims to the context to inspect for RBAC
|
||||
//nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, "claims", claims)
|
||||
r = r.WithContext(ctx)
|
||||
cookies := r.Cookies()
|
||||
tokenString, err := httputil.JoinCookies(common.AuthCookieName, cookies)
|
||||
if err != nil {
|
||||
http.Error(w, "Auth cookie not found", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
claims, _, err := authn.VerifyToken(tokenString)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
finalClaims := claims
|
||||
if isSSOConfigured {
|
||||
finalClaims, err = ssoClientApp.SetGroupsFromUserInfo(claims, SessionManagerClaimsIssuer)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid session", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
// Add claims to the context to inspect for RBAC
|
||||
//nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, "claims", finalClaims)
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package session
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
@@ -29,7 +30,11 @@ import (
|
||||
apps "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned/fake"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/client/listers/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test"
|
||||
"github.com/argoproj/argo-cd/v3/util"
|
||||
"github.com/argoproj/argo-cd/v3/util/cache"
|
||||
"github.com/argoproj/argo-cd/v3/util/crypto"
|
||||
jwtutil "github.com/argoproj/argo-cd/v3/util/jwt"
|
||||
"github.com/argoproj/argo-cd/v3/util/oidc"
|
||||
"github.com/argoproj/argo-cd/v3/util/password"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
utiltest "github.com/argoproj/argo-cd/v3/util/test"
|
||||
@@ -236,20 +241,39 @@ func strPointer(str string) *string {
|
||||
|
||||
func TestSessionManager_WithAuthMiddleware(t *testing.T) {
|
||||
handlerFunc := func() func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, _ *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
t.Helper()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "application/text")
|
||||
_, err := w.Write([]byte("Ok"))
|
||||
require.NoError(t, err, "error writing response: %s", err)
|
||||
|
||||
contextClaims := r.Context().Value("claims")
|
||||
if contextClaims != nil {
|
||||
var gotClaims jwt.MapClaims
|
||||
var ok bool
|
||||
if gotClaims, ok = contextClaims.(jwt.MapClaims); !ok {
|
||||
if tmpClaims, ok := contextClaims.(*jwt.MapClaims); ok && tmpClaims != nil {
|
||||
gotClaims = *tmpClaims
|
||||
}
|
||||
}
|
||||
jsonClaims, err := json.Marshal(gotClaims)
|
||||
require.NoError(t, err, "erorr marshalling claims set by AuthMiddleware")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, err = w.Write(jsonClaims)
|
||||
require.NoError(t, err, "error writing response: %s", err)
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "application/text")
|
||||
_, err := w.Write([]byte("Ok"))
|
||||
require.NoError(t, err, "error writing response: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
type testCase struct {
|
||||
name string
|
||||
authDisabled bool
|
||||
ssoEnabled bool
|
||||
cookieHeader bool
|
||||
verifiedClaims *jwt.RegisteredClaims
|
||||
verifiedClaims *jwt.MapClaims
|
||||
verifyTokenErr error
|
||||
userInfoCacheClaims *jwt.MapClaims
|
||||
expectedStatusCode int
|
||||
expectedResponseBody *string
|
||||
}
|
||||
@@ -258,47 +282,79 @@ func TestSessionManager_WithAuthMiddleware(t *testing.T) {
|
||||
{
|
||||
name: "will authenticate successfully",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: &jwt.RegisteredClaims{},
|
||||
verifiedClaims: &jwt.MapClaims{},
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("Ok"),
|
||||
expectedResponseBody: strPointer("{}"),
|
||||
},
|
||||
{
|
||||
name: "will be noop if auth is disabled",
|
||||
authDisabled: true,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: false,
|
||||
verifiedClaims: nil,
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("Ok"),
|
||||
},
|
||||
{
|
||||
name: "will return 400 if no cookie header",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: false,
|
||||
verifiedClaims: &jwt.RegisteredClaims{},
|
||||
verifiedClaims: &jwt.MapClaims{},
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusBadRequest,
|
||||
expectedResponseBody: nil,
|
||||
},
|
||||
{
|
||||
name: "will return 401 verify token fails",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: &jwt.RegisteredClaims{},
|
||||
verifiedClaims: &jwt.MapClaims{},
|
||||
verifyTokenErr: stderrors.New("token error"),
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusUnauthorized,
|
||||
expectedResponseBody: nil,
|
||||
},
|
||||
{
|
||||
name: "will return 200 if claims are nil",
|
||||
authDisabled: false,
|
||||
ssoEnabled: false,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: nil,
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("Ok"),
|
||||
expectedResponseBody: strPointer("null"),
|
||||
},
|
||||
{
|
||||
name: "will return 401 if sso is enabled but userinfo response not working",
|
||||
authDisabled: false,
|
||||
ssoEnabled: true,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: nil,
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: nil, // indicates that the userinfo response will not work since cache is empty and userinfo endpoint not rechable
|
||||
expectedStatusCode: http.StatusUnauthorized,
|
||||
expectedResponseBody: strPointer("Invalid session"),
|
||||
},
|
||||
{
|
||||
name: "will return 200 if sso is enabled and userinfo response from cache is valid",
|
||||
authDisabled: false,
|
||||
ssoEnabled: true,
|
||||
cookieHeader: true,
|
||||
verifiedClaims: &jwt.MapClaims{"sub": "randomUser", "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
verifyTokenErr: nil,
|
||||
userInfoCacheClaims: &jwt.MapClaims{"sub": "randomUser", "groups": []string{"superusers"}, "exp": float64(time.Now().Add(5 * time.Minute).Unix())},
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedResponseBody: strPointer("\"groups\":[\"superusers\"]"),
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
@@ -311,7 +367,47 @@ func TestSessionManager_WithAuthMiddleware(t *testing.T) {
|
||||
claims: tc.verifiedClaims,
|
||||
err: tc.verifyTokenErr,
|
||||
}
|
||||
ts := httptest.NewServer(WithAuthMiddleware(tc.authDisabled, tm, mux))
|
||||
clientApp := &oidc.ClientApp{} // all testcases need at least the empty struct for the function to work
|
||||
if tc.ssoEnabled {
|
||||
userInfoCache := cache.NewInMemoryCache(24 * time.Hour)
|
||||
signature, err := util.MakeSignature(32)
|
||||
require.NoError(t, err, "failed creating signature for settings object")
|
||||
cdSettings := &settings.ArgoCDSettings{
|
||||
ServerSignature: signature,
|
||||
OIDCConfigRAW: `
|
||||
issuer: http://localhost:63231
|
||||
enableUserInfoGroups: true
|
||||
userInfoPath: /`,
|
||||
}
|
||||
clientApp, err = oidc.NewClientApp(cdSettings, "", nil, "/argo-cd", userInfoCache)
|
||||
require.NoError(t, err, "failed creating clientapp")
|
||||
|
||||
// prepopulate the cache with claims to return for a userinfo call
|
||||
encryptionKey, err := cdSettings.GetServerEncryptionKey()
|
||||
require.NoError(t, err, "failed obtaining encryption key from settings")
|
||||
// set fake accessToken for GetUserInfo to not return early (can be the same for all cases)
|
||||
encAccessToken, err := crypto.Encrypt([]byte("123456"), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting dummy access token")
|
||||
err = userInfoCache.Set(&cache.Item{
|
||||
Key: oidc.FormatAccessTokenCacheKey("randomUser"),
|
||||
Object: encAccessToken,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
|
||||
// set cacheClaims to in-memory cache to let GetUserInfo return early with this information
|
||||
if tc.userInfoCacheClaims != nil {
|
||||
cacheClaims, err := json.Marshal(tc.userInfoCacheClaims)
|
||||
require.NoError(t, err)
|
||||
encCacheClaims, err := crypto.Encrypt([]byte(cacheClaims), encryptionKey)
|
||||
require.NoError(t, err, "failed encrypting cache Claims")
|
||||
err = userInfoCache.Set(&cache.Item{
|
||||
Key: oidc.FormatUserInfoResponseCacheKey("randomUser"),
|
||||
Object: encCacheClaims,
|
||||
})
|
||||
require.NoError(t, err, "failed setting item to in-memory cache")
|
||||
}
|
||||
}
|
||||
ts := httptest.NewServer(WithAuthMiddleware(tc.authDisabled, tc.ssoEnabled, clientApp, tm, mux))
|
||||
defer ts.Close()
|
||||
req, err := http.NewRequest(http.MethodGet, ts.URL, http.NoBody)
|
||||
require.NoErrorf(t, err, "error creating request: %s", err)
|
||||
|
||||
@@ -1286,13 +1286,13 @@ func (mgr *SettingsManager) GetSettings() (*ArgoCDSettings, error) {
|
||||
|
||||
var settings ArgoCDSettings
|
||||
var errs []error
|
||||
updateSettingsFromConfigMap(&settings, argoCDCM)
|
||||
if err := mgr.updateSettingsFromSecret(&settings, argoCDSecret, secrets); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return &settings, errors.Join(errs...)
|
||||
}
|
||||
updateSettingsFromConfigMap(&settings, argoCDCM)
|
||||
|
||||
return &settings, nil
|
||||
}
|
||||
|
||||
@@ -330,23 +330,23 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload any) {
|
||||
appIf := a.appsLister.Applications(nsFilter)
|
||||
apps, err := appIf.List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Warnf("Failed to list applications: %v", err)
|
||||
log.Errorf("Failed to list applications: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
installationID, err := a.settingsSrc.GetInstallationID()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get installation ID: %v", err)
|
||||
log.Errorf("Failed to get installation ID: %v", err)
|
||||
return
|
||||
}
|
||||
trackingMethod, err := a.settingsSrc.GetTrackingMethod()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get trackingMethod: %v", err)
|
||||
log.Errorf("Failed to get trackingMethod: %v", err)
|
||||
return
|
||||
}
|
||||
appInstanceLabelKey, err := a.settingsSrc.GetAppInstanceLabelKey()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get appInstanceLabelKey: %v", err)
|
||||
log.Errorf("Failed to get appInstanceLabelKey: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -362,41 +362,47 @@ func (a *ArgoCDWebhookHandler) HandleEvent(payload any) {
|
||||
for _, webURL := range webURLs {
|
||||
repoRegexp, err := GetWebURLRegex(webURL)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to get repoRegexp: %s", err)
|
||||
log.Errorf("Failed to get repoRegexp: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// iterate over apps and check if any files specified in their sources have changed
|
||||
for _, app := range filteredApps {
|
||||
// get all sources, including sync source and dry source if source hydrator is configured
|
||||
sources := app.Spec.GetSources()
|
||||
if app.Spec.SourceHydrator != nil {
|
||||
drySource := app.Spec.SourceHydrator.GetDrySource()
|
||||
if sourceRevisionHasChanged(drySource, revision, touchedHead) && sourceUsesURL(drySource, webURL, repoRegexp) {
|
||||
refreshPaths := path.GetAppRefreshPaths(&app)
|
||||
if path.AppFilesHaveChanged(refreshPaths, changedFiles) {
|
||||
namespacedAppInterface := a.appClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
log.Infof("webhook trigger refresh app to hydrate '%s'", app.Name)
|
||||
_, err = argo.RefreshApp(namespacedAppInterface, app.Name, v1alpha1.RefreshTypeNormal, true)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to hydrate app '%s' for controller reprocessing: %v", app.Name, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
// we already have sync source, so add dry source if source hydrator is configured
|
||||
sources = append(sources, app.Spec.SourceHydrator.GetDrySource())
|
||||
}
|
||||
|
||||
for _, source := range app.Spec.GetSources() {
|
||||
// iterate over all sources and check if any files specified in refresh paths have changed
|
||||
for _, source := range sources {
|
||||
if sourceRevisionHasChanged(source, revision, touchedHead) && sourceUsesURL(source, webURL, repoRegexp) {
|
||||
refreshPaths := path.GetAppRefreshPaths(&app)
|
||||
refreshPaths := path.GetSourceRefreshPaths(&app, source)
|
||||
if path.AppFilesHaveChanged(refreshPaths, changedFiles) {
|
||||
namespacedAppInterface := a.appClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
_, err = argo.RefreshApp(namespacedAppInterface, app.Name, v1alpha1.RefreshTypeNormal, true)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to refresh app '%s' for controller reprocessing: %v", app.Name, err)
|
||||
continue
|
||||
hydrate := false
|
||||
if app.Spec.SourceHydrator != nil {
|
||||
drySource := app.Spec.SourceHydrator.GetDrySource()
|
||||
if (&source).Equals(&drySource) {
|
||||
hydrate = true
|
||||
}
|
||||
}
|
||||
// No need to refresh multiple times if multiple sources match.
|
||||
break
|
||||
|
||||
// refresh paths have changed, so we need to refresh the app
|
||||
log.Infof("refreshing app '%s' from webhook", app.Name)
|
||||
if hydrate {
|
||||
// log if we need to hydrate the app
|
||||
log.Infof("webhook trigger refresh app to hydrate '%s'", app.Name)
|
||||
}
|
||||
namespacedAppInterface := a.appClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
if _, err := argo.RefreshApp(namespacedAppInterface, app.Name, v1alpha1.RefreshTypeNormal, hydrate); err != nil {
|
||||
log.Errorf("Failed to refresh app '%s': %v", app.Name, err)
|
||||
}
|
||||
break // we don't need to check other sources
|
||||
} else if change.shaBefore != "" && change.shaAfter != "" {
|
||||
if err := a.storePreviouslyCachedManifests(&app, change, trackingMethod, appInstanceLabelKey, installationID); err != nil {
|
||||
log.Warnf("Failed to store cached manifests of previous revision for app '%s': %v", app.Name, err)
|
||||
// update the cached manifests with the new revision cache key
|
||||
if err := a.storePreviouslyCachedManifests(&app, change, trackingMethod, appInstanceLabelKey, installationID, source); err != nil {
|
||||
log.Errorf("Failed to store cached manifests of previous revision for app '%s': %v", app.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -449,7 +455,7 @@ func getURLRegex(originalURL string, regexpFormat string) (*regexp.Regexp, error
|
||||
return repoRegexp, nil
|
||||
}
|
||||
|
||||
func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Application, change changeInfo, trackingMethod string, appInstanceLabelKey string, installationID string) error {
|
||||
func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Application, change changeInfo, trackingMethod string, appInstanceLabelKey string, installationID string, source v1alpha1.ApplicationSource) error {
|
||||
destCluster, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, a.db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error validating destination: %w", err)
|
||||
@@ -472,7 +478,7 @@ func (a *ArgoCDWebhookHandler) storePreviouslyCachedManifests(app *v1alpha1.Appl
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting ref sources: %w", err)
|
||||
}
|
||||
source := app.Spec.GetSource()
|
||||
|
||||
cache.LogDebugManifestCacheKeyFields("moving manifests cache", "webhook app revision changed", change.shaBefore, &source, refSources, &clusterInfo, app.Spec.Destination.Namespace, trackingMethod, appInstanceLabelKey, app.Name, nil)
|
||||
|
||||
if err := a.repoCache.SetNewRevisionManifests(change.shaAfter, change.shaBefore, &source, refSources, &clusterInfo, app.Spec.Destination.Namespace, trackingMethod, appInstanceLabelKey, app.Name, nil, installationID); err != nil {
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned/fake"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/cache"
|
||||
cacheutil "github.com/argoproj/argo-cd/v3/util/cache"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
@@ -182,64 +183,6 @@ func TestAzureDevOpsCommitEvent(t *testing.T) {
|
||||
hook.Reset()
|
||||
}
|
||||
|
||||
// TestGitHubCommitEvent_MultiSource_Refresh makes sure that a webhook will refresh a multi-source app when at least
|
||||
// one source matches.
|
||||
func TestGitHubCommitEvent_MultiSource_Refresh(t *testing.T) {
|
||||
hook := test.NewGlobal()
|
||||
var patched bool
|
||||
reaction := func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patchAction := action.(kubetesting.PatchAction)
|
||||
assert.Equal(t, "app-to-refresh", patchAction.GetName())
|
||||
patched = true
|
||||
return true, nil, nil
|
||||
}
|
||||
h := NewMockHandler(&reactorDef{"patch", "applications", reaction}, []string{}, &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-to-refresh",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/some/unrelated-repo",
|
||||
Path: ".",
|
||||
},
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: ".",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-to-ignore",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/some/unrelated-repo",
|
||||
Path: ".",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/webhook", http.NoBody)
|
||||
req.Header.Set("X-GitHub-Event", "push")
|
||||
eventJSON, err := os.ReadFile("testdata/github-commit-event.json")
|
||||
require.NoError(t, err)
|
||||
req.Body = io.NopCloser(bytes.NewReader(eventJSON))
|
||||
w := httptest.NewRecorder()
|
||||
h.Handler(w, req)
|
||||
close(h.queue)
|
||||
h.Wait()
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
expectedLogResult := "Requested app 'app-to-refresh' refresh"
|
||||
assert.Equal(t, expectedLogResult, hook.LastEntry().Message)
|
||||
assert.True(t, patched)
|
||||
hook.Reset()
|
||||
}
|
||||
|
||||
// TestGitHubCommitEvent_AppsInOtherNamespaces makes sure that webhooks properly find apps in the configured set of
|
||||
// allowed namespaces when Apps are allowed in any namespace
|
||||
func TestGitHubCommitEvent_AppsInOtherNamespaces(t *testing.T) {
|
||||
@@ -338,72 +281,6 @@ func TestGitHubCommitEvent_AppsInOtherNamespaces(t *testing.T) {
|
||||
hook.Reset()
|
||||
}
|
||||
|
||||
// TestGitHubCommitEvent_Hydrate makes sure that a webhook will hydrate an app when dry source changed.
|
||||
func TestGitHubCommitEvent_Hydrate(t *testing.T) {
|
||||
hook := test.NewGlobal()
|
||||
var patched bool
|
||||
reaction := func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patchAction := action.(kubetesting.PatchAction)
|
||||
assert.Equal(t, "app-to-hydrate", patchAction.GetName())
|
||||
patched = true
|
||||
return true, nil, nil
|
||||
}
|
||||
h := NewMockHandler(&reactorDef{"patch", "applications", reaction}, []string{}, &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-to-hydrate",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
TargetRevision: "HEAD",
|
||||
Path: ".",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "environments/dev",
|
||||
Path: ".",
|
||||
},
|
||||
HydrateTo: nil,
|
||||
},
|
||||
},
|
||||
}, &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-to-ignore",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/some/unrelated-repo",
|
||||
Path: ".",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/webhook", http.NoBody)
|
||||
req.Header.Set("X-GitHub-Event", "push")
|
||||
eventJSON, err := os.ReadFile("testdata/github-commit-event.json")
|
||||
require.NoError(t, err)
|
||||
req.Body = io.NopCloser(bytes.NewReader(eventJSON))
|
||||
w := httptest.NewRecorder()
|
||||
h.Handler(w, req)
|
||||
close(h.queue)
|
||||
h.Wait()
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
assert.True(t, patched)
|
||||
|
||||
logMessages := make([]string, 0, len(hook.Entries))
|
||||
for _, entry := range hook.Entries {
|
||||
logMessages = append(logMessages, entry.Message)
|
||||
}
|
||||
|
||||
assert.Contains(t, logMessages, "webhook trigger refresh app to hydrate 'app-to-hydrate'")
|
||||
assert.NotContains(t, logMessages, "webhook trigger refresh app to hydrate 'app-to-ignore'")
|
||||
|
||||
hook.Reset()
|
||||
}
|
||||
|
||||
func TestGitHubTagEvent(t *testing.T) {
|
||||
hook := test.NewGlobal()
|
||||
h := NewMockHandler(nil, []string{})
|
||||
@@ -646,7 +523,8 @@ func Test_affectedRevisionInfo_appRevisionHasChanged(t *testing.T) {
|
||||
// The payload's "push.changes[0].new.name" member seems to only have the branch name (based on the example payload).
|
||||
// https://support.atlassian.com/bitbucket-cloud/docs/event-payloads/#EventPayloads-Push
|
||||
var pl bitbucket.RepoPushPayload
|
||||
_ = json.Unmarshal([]byte(fmt.Sprintf(`{"push":{"changes":[{"new":{"name":%q}}]}}`, branchName)), &pl)
|
||||
err := json.Unmarshal([]byte(fmt.Sprintf(`{"push":{"changes":[{"new":{"name":%q}}]}}`, branchName)), &pl)
|
||||
require.NoError(t, err)
|
||||
return pl
|
||||
}
|
||||
|
||||
@@ -878,6 +756,463 @@ func TestGitHubCommitEventMaxPayloadSize(t *testing.T) {
|
||||
hook.Reset()
|
||||
}
|
||||
|
||||
func TestHandleEvent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
app *v1alpha1.Application
|
||||
changedFile string // file that was changed in the webhook payload
|
||||
hasRefresh bool // application has refresh annotation applied
|
||||
hasHydrate bool // application has hydrate annotation applied
|
||||
updateCache bool // cache should be updated with the new revision
|
||||
}{
|
||||
{
|
||||
name: "single source without annotation - always refreshes",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "source/path",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "source/path/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: false,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "single source with annotation - matching file triggers refresh",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
Annotations: map[string]string{
|
||||
"argocd.argoproj.io/manifest-generate-paths": "deploy",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "source/path",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "source/path/deploy/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: false,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "single source with annotation - non-matching file updates cache",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
Annotations: map[string]string{
|
||||
"argocd.argoproj.io/manifest-generate-paths": "manifests",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "source/path",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "source/path/other/app.yaml",
|
||||
hasRefresh: false,
|
||||
hasHydrate: false,
|
||||
updateCache: true,
|
||||
},
|
||||
{
|
||||
name: "single source with multiple paths annotation - matching subpath triggers refresh",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
Annotations: map[string]string{
|
||||
"argocd.argoproj.io/manifest-generate-paths": "manifests;dev/deploy;other/path",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "source/path",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "source/path/dev/deploy/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: false,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "multi-source without annotation - always refreshes",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "helm-charts",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "ksapps",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "ksapps/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: false,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "multi-source with annotation - matching file triggers refresh",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
Annotations: map[string]string{
|
||||
"argocd.argoproj.io/manifest-generate-paths": "components",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "helm-charts",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
Path: "ksapps",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "ksapps/components/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: false,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "source hydrator sync source without annotation - refreshes when sync path matches",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "dry/path",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "master",
|
||||
Path: "sync/path",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "sync/path/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: false,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "source hydrator dry source without annotation - always refreshes and hydrates",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "dry/path",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "master",
|
||||
Path: "sync/path",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "other/path/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: true,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "source hydrator sync source with annotation - refresh only",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
Annotations: map[string]string{
|
||||
"argocd.argoproj.io/manifest-generate-paths": "deploy",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "dry/path",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "master",
|
||||
Path: "sync/path",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "sync/path/deploy/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: false,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "source hydrator dry source with annotation - refresh and hydrate",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
Annotations: map[string]string{
|
||||
"argocd.argoproj.io/manifest-generate-paths": "deploy",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "dry/path",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "master",
|
||||
Path: "sync/path",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "dry/path/deploy/app.yaml",
|
||||
hasRefresh: true,
|
||||
hasHydrate: true,
|
||||
updateCache: false,
|
||||
},
|
||||
{
|
||||
name: "source hydrator dry source with annotation - non-matching file updates cache",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-app",
|
||||
Namespace: "argocd",
|
||||
Annotations: map[string]string{
|
||||
"argocd.argoproj.io/manifest-generate-paths": "deploy",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://github.com/jessesuen/test-repo",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "dry/path",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "master",
|
||||
Path: "sync/path",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
changedFile: "dry/path/other/app.yaml",
|
||||
hasRefresh: false,
|
||||
hasHydrate: false,
|
||||
updateCache: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ttc := tt
|
||||
t.Run(ttc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var patchData []byte
|
||||
var patched bool
|
||||
reaction := func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetVerb() == "patch" {
|
||||
patchAction := action.(kubetesting.PatchAction)
|
||||
patchData = patchAction.GetPatch()
|
||||
patched = true
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// Setup cache
|
||||
inMemoryCache := cacheutil.NewInMemoryCache(1 * time.Hour)
|
||||
cacheClient := cacheutil.NewCache(inMemoryCache)
|
||||
repoCache := cache.NewCache(
|
||||
cacheClient,
|
||||
1*time.Minute,
|
||||
1*time.Minute,
|
||||
10*time.Second,
|
||||
)
|
||||
|
||||
// Pre-populate cache with beforeSHA if we're testing cache updates
|
||||
if ttc.updateCache {
|
||||
var source *v1alpha1.ApplicationSource
|
||||
if ttc.app.Spec.SourceHydrator != nil {
|
||||
drySource := ttc.app.Spec.SourceHydrator.GetDrySource()
|
||||
source = &drySource
|
||||
} else if len(ttc.app.Spec.Sources) > 0 {
|
||||
source = &ttc.app.Spec.Sources[0]
|
||||
}
|
||||
if source != nil {
|
||||
setupTestCache(t, repoCache, ttc.app.Name, source, []string{"test-manifest"})
|
||||
}
|
||||
}
|
||||
|
||||
// Setup server cache with cluster info
|
||||
serverCache := servercache.NewCache(appstate.NewCache(cacheClient, time.Minute), time.Minute, time.Minute)
|
||||
mockDB := &mocks.ArgoDB{}
|
||||
|
||||
// Set destination if not present (required for cache updates)
|
||||
if ttc.app.Spec.Destination.Server == "" {
|
||||
ttc.app.Spec.Destination.Server = testClusterURL
|
||||
}
|
||||
|
||||
mockDB.EXPECT().GetCluster(mock.Anything, testClusterURL).Return(&v1alpha1.Cluster{
|
||||
Server: testClusterURL,
|
||||
Info: v1alpha1.ClusterInfo{
|
||||
ServerVersion: "1.28.0",
|
||||
ConnectionState: v1alpha1.ConnectionState{Status: v1alpha1.ConnectionStatusSuccessful},
|
||||
APIVersions: []string{},
|
||||
},
|
||||
}, nil).Maybe()
|
||||
|
||||
err := serverCache.SetClusterInfo(testClusterURL, &v1alpha1.ClusterInfo{
|
||||
ServerVersion: "1.28.0",
|
||||
ConnectionState: v1alpha1.ConnectionState{Status: v1alpha1.ConnectionStatusSuccessful},
|
||||
APIVersions: []string{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create handler with reaction
|
||||
appClientset := appclientset.NewSimpleClientset(ttc.app)
|
||||
defaultReactor := appClientset.ReactionChain[0]
|
||||
appClientset.ReactionChain = nil
|
||||
appClientset.AddReactor("list", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return defaultReactor.React(action)
|
||||
})
|
||||
appClientset.AddReactor("patch", "applications", reaction)
|
||||
|
||||
h := NewHandler(
|
||||
"argocd",
|
||||
[]string{},
|
||||
10,
|
||||
appClientset,
|
||||
&fakeAppsLister{clientset: appClientset},
|
||||
&settings.ArgoCDSettings{},
|
||||
&fakeSettingsSrc{},
|
||||
repoCache,
|
||||
serverCache,
|
||||
mockDB,
|
||||
int64(50)*1024*1024,
|
||||
)
|
||||
|
||||
// Create payload with the changed file
|
||||
payload := createTestPayload(ttc.changedFile)
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/webhook", http.NoBody)
|
||||
req.Header.Set("X-GitHub-Event", "push")
|
||||
req.Body = io.NopCloser(bytes.NewReader(payload))
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
h.Handler(w, req)
|
||||
close(h.queue)
|
||||
h.Wait()
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
// Verify refresh behavior
|
||||
assert.Equal(t, ttc.hasRefresh, patched, "patch status mismatch for test: %s", ttc.name)
|
||||
if patched && patchData != nil {
|
||||
verifyAnnotations(t, patchData, ttc.hasRefresh, ttc.hasHydrate)
|
||||
}
|
||||
|
||||
// Verify cache update behavior
|
||||
if ttc.updateCache {
|
||||
var source *v1alpha1.ApplicationSource
|
||||
if ttc.app.Spec.SourceHydrator != nil {
|
||||
drySource := ttc.app.Spec.SourceHydrator.GetDrySource()
|
||||
source = &drySource
|
||||
} else if len(ttc.app.Spec.Sources) > 0 {
|
||||
source = &ttc.app.Spec.Sources[0]
|
||||
}
|
||||
if source != nil {
|
||||
// Verify cache was updated with afterSHA
|
||||
clusterInfo := &mockClusterInfo{}
|
||||
var afterManifests cache.CachedManifestResponse
|
||||
err := repoCache.GetManifests(testAfterSHA, source, nil, clusterInfo, "", "", testAppLabelKey, ttc.app.Name, &afterManifests, nil, "")
|
||||
require.NoError(t, err, "cache should be updated with afterSHA")
|
||||
if err == nil {
|
||||
assert.Equal(t, testAfterSHA, afterManifests.ManifestResponse.Revision, "cached revision should match afterSHA")
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// createTestPayload creates a GitHub push event payload with the specified changed file
|
||||
func createTestPayload(changedFile string) []byte {
|
||||
payload := fmt.Sprintf(`{
|
||||
"ref": "refs/heads/master",
|
||||
"before": "%s",
|
||||
"after": "%s",
|
||||
"repository": {
|
||||
"html_url": "https://github.com/jessesuen/test-repo",
|
||||
"default_branch": "master"
|
||||
},
|
||||
"commits": [
|
||||
{
|
||||
"added": [],
|
||||
"modified": ["%s"],
|
||||
"removed": []
|
||||
}
|
||||
]
|
||||
}`, testBeforeSHA, testAfterSHA, changedFile)
|
||||
return []byte(payload)
|
||||
}
|
||||
|
||||
func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
@@ -922,10 +1257,9 @@ func Test_affectedRevisionInfo_bitbucket_changed_files(t *testing.T) {
|
||||
"oldHash": "abcdef",
|
||||
"newHash": "ghijkl",
|
||||
})
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
_ = json.Unmarshal(doc.Bytes(), &pl)
|
||||
require.NoError(t, err)
|
||||
err = json.Unmarshal(doc.Bytes(), &pl)
|
||||
require.NoError(t, err)
|
||||
return pl
|
||||
}
|
||||
|
||||
@@ -1238,3 +1572,72 @@ func getDiffstatResponderFn() func(req *http.Request) (*http.Response, error) {
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// mockClusterInfo implements cache.ClusterRuntimeInfo for testing
|
||||
type mockClusterInfo struct{}
|
||||
|
||||
func (m *mockClusterInfo) GetApiVersions() []string { return []string{} } //nolint:revive // interface method name
|
||||
func (m *mockClusterInfo) GetKubeVersion() string { return "1.28.0" }
|
||||
|
||||
// Common test constants
|
||||
const (
|
||||
testBeforeSHA = "d5c1ffa8e294bc18c639bfb4e0df499251034414"
|
||||
testAfterSHA = "63738bb582c8b540af7bcfc18f87c575c3ed66e0"
|
||||
testClusterURL = "https://kubernetes.default.svc"
|
||||
testAppLabelKey = "mycompany.com/appname"
|
||||
)
|
||||
|
||||
// verifyAnnotations is a helper that checks if the expected annotations are present in patch data
|
||||
func verifyAnnotations(t *testing.T, patchData []byte, expectRefresh bool, expectHydrate bool) {
|
||||
t.Helper()
|
||||
if patchData == nil {
|
||||
if expectRefresh {
|
||||
t.Error("expected app to be patched but patchData is nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var patchMap map[string]any
|
||||
err := json.Unmarshal(patchData, &patchMap)
|
||||
require.NoError(t, err)
|
||||
|
||||
metadata, hasMetadata := patchMap["metadata"].(map[string]any)
|
||||
require.True(t, hasMetadata, "patch should have metadata")
|
||||
|
||||
annotations, hasAnnotations := metadata["annotations"].(map[string]any)
|
||||
require.True(t, hasAnnotations, "patch should have annotations")
|
||||
|
||||
// Check refresh annotation
|
||||
refreshValue, hasRefresh := annotations["argocd.argoproj.io/refresh"]
|
||||
if expectRefresh {
|
||||
assert.True(t, hasRefresh, "should have refresh annotation")
|
||||
assert.Equal(t, "normal", refreshValue, "refresh annotation should be 'normal'")
|
||||
} else {
|
||||
assert.False(t, hasRefresh, "should not have refresh annotation")
|
||||
}
|
||||
|
||||
// Check hydrate annotation
|
||||
hydrateValue, hasHydrate := annotations["argocd.argoproj.io/hydrate"]
|
||||
if expectHydrate {
|
||||
assert.True(t, hasHydrate, "should have hydrate annotation")
|
||||
assert.Equal(t, "normal", hydrateValue, "hydrate annotation should be 'normal'")
|
||||
} else {
|
||||
assert.False(t, hasHydrate, "should not have hydrate annotation")
|
||||
}
|
||||
}
|
||||
|
||||
// setupTestCache is a helper that creates and populates a test cache
|
||||
func setupTestCache(t *testing.T, repoCache *cache.Cache, appName string, source *v1alpha1.ApplicationSource, manifests []string) {
|
||||
t.Helper()
|
||||
clusterInfo := &mockClusterInfo{}
|
||||
dummyManifests := &cache.CachedManifestResponse{
|
||||
ManifestResponse: &apiclient.ManifestResponse{
|
||||
Revision: testBeforeSHA,
|
||||
Manifests: manifests,
|
||||
Namespace: "",
|
||||
Server: testClusterURL,
|
||||
},
|
||||
}
|
||||
err := repoCache.SetManifests(testBeforeSHA, source, nil, clusterInfo, "", "", testAppLabelKey, appName, dummyManifests, nil, "")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user