mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-03-06 08:28:50 +01:00
Compare commits
70 Commits
dynamic-re
...
v2.9.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5ea5c4df5 | ||
|
|
c2c9746050 | ||
|
|
78cd50b2c7 | ||
|
|
dd86b08369 | ||
|
|
0ca43663c9 | ||
|
|
d4c37e2521 | ||
|
|
ba60fadd94 | ||
|
|
58b04e5e11 | ||
|
|
3acd5ee30d | ||
|
|
45d5de702e | ||
|
|
b8efc8b1ab | ||
|
|
9cf0c69bbe | ||
|
|
871b045368 | ||
|
|
7f9be43b8b | ||
|
|
78ad599120 | ||
|
|
557871d223 | ||
|
|
f29e4fbea3 | ||
|
|
766316ef74 | ||
|
|
eb0afcbc3d | ||
|
|
0083647b8b | ||
|
|
5289315c3f | ||
|
|
83ec3bfbf7 | ||
|
|
5c86f758c3 | ||
|
|
67e1e04afb | ||
|
|
266e92e3a1 | ||
|
|
6648d31671 | ||
|
|
82185106a2 | ||
|
|
b14837e58e | ||
|
|
9c4a90af91 | ||
|
|
3750adefa7 | ||
|
|
dc3d08e626 | ||
|
|
eaa9af21d7 | ||
|
|
55e5d6bf3e | ||
|
|
85422bbb17 | ||
|
|
79901a4e84 | ||
|
|
5506e8520c | ||
|
|
0a97e150d8 | ||
|
|
e1ac2f6071 | ||
|
|
8c3f38a97d | ||
|
|
8b9c448786 | ||
|
|
80baeb8a6c | ||
|
|
72f7b14594 | ||
|
|
b9bf46dfb9 | ||
|
|
d7c489b9cc | ||
|
|
38eb17a027 | ||
|
|
ea3402962f | ||
|
|
b3fabc23cd | ||
|
|
69d6d1064b | ||
|
|
d09621d36b | ||
|
|
728205618e | ||
|
|
912a2db05c | ||
|
|
d105196075 | ||
|
|
61dc8b5083 | ||
|
|
cbd88068b2 | ||
|
|
129cf5370f | ||
|
|
30767ae9b0 | ||
|
|
03c6e1a14e | ||
|
|
08e53e7274 | ||
|
|
1d274585bd | ||
|
|
62995f0675 | ||
|
|
17527044b8 | ||
|
|
cb25382658 | ||
|
|
045f5b1a21 | ||
|
|
ae47e05dd7 | ||
|
|
2b1b125fa6 | ||
|
|
ef88d1d026 | ||
|
|
98ee9443e3 | ||
|
|
491b3898ac | ||
|
|
45c0c2a422 | ||
|
|
1a684a7e1e |
6
.github/workflows/ci-build.yaml
vendored
6
.github/workflows/ci-build.yaml
vendored
@@ -265,9 +265,9 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@bea5baf987ba7aa777a8a0b4ace377a21c45c381 # v3.8.0
|
||||
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
|
||||
with:
|
||||
node-version: '20.4.0'
|
||||
node-version: '20.7.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
@@ -361,7 +361,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.27.2, v1.26.0, v1.25.4, v1.24.3]
|
||||
k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
|
||||
10
.github/workflows/image-reuse.yaml
vendored
10
.github/workflows/image-reuse.yaml
vendored
@@ -76,7 +76,7 @@ jobs:
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
|
||||
with:
|
||||
cosign-release: 'v2.0.0'
|
||||
cosign-release: 'v2.0.2'
|
||||
|
||||
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
|
||||
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
@@ -135,6 +135,14 @@ jobs:
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1
|
||||
|
||||
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
@@ -204,7 +204,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
|
||||
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fca
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.21.1@sha256:cffaba795c36f07e372c7191b35ceaae114d74c31c3763d442982e3a4df3b39e AS builder
|
||||
FROM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.1@sha256:cffaba795c36f07e372c7191b35ceaae114d74c31c3763d442982e3a4df3b39e AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -160,13 +160,15 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
// If appset used progressive sync but stopped, clean up the progressive sync application statuses
|
||||
log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
} else {
|
||||
} else if applicationSetInfo.Spec.Strategy != nil {
|
||||
// appset uses progressive sync
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
@@ -436,8 +438,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
_, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
@@ -451,15 +452,6 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
continue
|
||||
}
|
||||
|
||||
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error validating permissions: %s", err)
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return errorsByIndex, nil
|
||||
@@ -631,7 +623,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
@@ -683,6 +675,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
|
||||
@@ -853,7 +846,11 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
if len(newFinalizers) != len(app.Finalizers) {
|
||||
updated := app.DeepCopy()
|
||||
updated.Finalizers = newFinalizers
|
||||
if err := r.Client.Patch(ctx, updated, client.MergeFrom(app)); err != nil {
|
||||
patch := client.MergeFrom(app)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
utils.LogPatch(appLog, patch, updated)
|
||||
}
|
||||
if err := r.Client.Patch(ctx, updated, patch); err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
}
|
||||
r.updateCache(ctx, updated, appLog)
|
||||
|
||||
@@ -979,6 +979,296 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "Ensure that ignored targetRevision difference doesn't cause an update, even if another field changes",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// The targetRevision is ignored, so this should not be updated.
|
||||
TargetRevision: "foo",
|
||||
// This should be updated.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// This is the existing value from the cluster, which should not be updated because the field is ignored.
|
||||
TargetRevision: "bar",
|
||||
// This was missing on the cluster, so it should be added.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
// This should not be updated, because reconciliation shouldn't modify the App.
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
// This existed only in the cluster, but it shouldn't be removed, because the field is ignored.
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "Demonstrate limitation of MergePatch", // Maybe we can fix this in Argo CD 3.0: https://github.com/argoproj/argo-cd/issues/15975
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
// The Parameters field got blown away, because the values field changed. MergePatch
|
||||
// doesn't merge list items, it replaces the whole list if an item changes.
|
||||
// If we eventually add a `name` field to Sources, we can use StrategicMergePatch.
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
||||
@@ -1012,7 +1302,6 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
}, got)
|
||||
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, obj, *got)
|
||||
}
|
||||
})
|
||||
@@ -1951,7 +2240,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
@@ -1959,9 +2248,8 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://good-cluster"}}},
|
||||
project := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"},
|
||||
}
|
||||
appSet := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1974,22 +2262,22 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "good-cluster","url": "https://good-cluster"}`),
|
||||
Raw: []byte(`{"project": "good-project"}`),
|
||||
}, {
|
||||
Raw: []byte(`{"cluster": "bad-cluster","url": "https://bad-cluster"}`),
|
||||
Raw: []byte(`{"project": "bad-project"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "{{.cluster}}",
|
||||
Name: "{{.project}}",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "{{.url}}"},
|
||||
Project: "{{.project}}",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "https://kubernetes.default.svc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1997,17 +2285,9 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
argoObjs := []runtime.Object{&project}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil)
|
||||
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
goodCluster,
|
||||
}}, nil)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -2039,12 +2319,12 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
var app v1alpha1.Application
|
||||
|
||||
// make sure good app got created
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-cluster"}, &app)
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-project"}, &app)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, app.Name, "good-cluster")
|
||||
assert.Equal(t, app.Name, "good-project")
|
||||
|
||||
// make sure bad app was not created
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "bad-cluster"}, &app)
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "bad-project"}, &app)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
kubetesting "k8s.io/client-go/testing"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/test/e2e/fixture/applicationsets/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -69,7 +68,7 @@ func createClusterSecret(secretName string, clusterName string, clusterServer st
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: utils.ArgoCDNamespace,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{
|
||||
ArgoCDSecretTypeLabel: ArgoCDSecretTypeCluster,
|
||||
},
|
||||
@@ -111,7 +110,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
objects = append(objects, secret)
|
||||
kubeclientset := fake.NewSimpleClientset(objects...)
|
||||
|
||||
appCond := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
appCond := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Nil(t, appCond)
|
||||
assert.Equal(t, "https://127.0.0.1:6443", dest.Server)
|
||||
assert.True(t, dest.IsServerInferred())
|
||||
@@ -124,7 +123,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, nil, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, nil, fakeNamespace)
|
||||
assert.Equal(t, "application destination can't have both name and server defined: minikube https://127.0.0.1:6443", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
@@ -139,7 +138,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
return true, nil, fmt.Errorf("an error occurred")
|
||||
})
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Equal(t, "unable to find destination server: an error occurred", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
@@ -154,7 +153,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
objects = append(objects, secret)
|
||||
kubeclientset := fake.NewSimpleClientset(objects...)
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Equal(t, "unable to find destination server: there are no clusters with this name: minikube", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
@@ -171,7 +170,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
objects = append(objects, secret, secret2)
|
||||
kubeclientset := fake.NewSimpleClientset(objects...)
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Equal(t, "unable to find destination server: there are 2 clusters with the same name: [https://127.0.0.1:2443 https://127.0.0.1:8443]", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
|
||||
@@ -2,18 +2,24 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
)
|
||||
|
||||
// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function
|
||||
@@ -29,7 +35,7 @@ import (
|
||||
// The MutateFn is called regardless of creating or updating an object.
|
||||
//
|
||||
// It returns the executed operation and an error.
|
||||
func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
|
||||
key := client.ObjectKeyFromObject(obj)
|
||||
if err := c.Get(ctx, key, obj); err != nil {
|
||||
@@ -45,15 +51,24 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
|
||||
return controllerutil.OperationResultCreated, nil
|
||||
}
|
||||
|
||||
existingObj := obj.DeepCopyObject()
|
||||
existing, ok := existingObj.(client.Object)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("existing object is not a client.Object"))
|
||||
}
|
||||
normalizedLive := obj.DeepCopy()
|
||||
|
||||
// Mutate the live object to match the desired state.
|
||||
if err := mutate(f, key, obj); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
|
||||
// Apply ignoreApplicationDifferences rules to remove ignored fields from both the live and the desired state. This
|
||||
// prevents those differences from appearing in the diff and therefore in the patch.
|
||||
err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj)
|
||||
if err != nil {
|
||||
return controllerutil.OperationResultNone, fmt.Errorf("failed to apply ignore differences: %w", err)
|
||||
}
|
||||
|
||||
// Normalize to avoid diffing on unimportant differences.
|
||||
normalizedLive.Spec = *argo.NormalizeApplicationSpec(&normalizedLive.Spec)
|
||||
obj.Spec = *argo.NormalizeApplicationSpec(&obj.Spec)
|
||||
|
||||
equality := conversion.EqualitiesOrDie(
|
||||
func(a, b resource.Quantity) bool {
|
||||
// Ignore formatting, only care that numeric value stayed the same.
|
||||
@@ -79,16 +94,34 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
|
||||
},
|
||||
)
|
||||
|
||||
if equality.DeepEqual(existing, obj) {
|
||||
if equality.DeepEqual(normalizedLive, obj) {
|
||||
return controllerutil.OperationResultNone, nil
|
||||
}
|
||||
|
||||
if err := c.Patch(ctx, obj, client.MergeFrom(existing)); err != nil {
|
||||
patch := client.MergeFrom(normalizedLive)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
LogPatch(logCtx, patch, obj)
|
||||
}
|
||||
if err := c.Patch(ctx, obj, patch); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
return controllerutil.OperationResultUpdated, nil
|
||||
}
|
||||
|
||||
func LogPatch(logCtx *log.Entry, patch client.Patch, obj *argov1alpha1.Application) {
|
||||
patchBytes, err := patch.Data(obj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to generate patch: %v", err)
|
||||
}
|
||||
// Get the patch as a plain object so it is easier to work with in json logs.
|
||||
var patchObj map[string]interface{}
|
||||
err = json.Unmarshal(patchBytes, &patchObj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to unmarshal patch: %v", err)
|
||||
}
|
||||
logCtx.WithField("patch", patchObj).Debug("patching application")
|
||||
}
|
||||
|
||||
// mutate wraps a MutateFn and applies validation to its result
|
||||
func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object) error {
|
||||
if err := f(); err != nil {
|
||||
@@ -99,3 +132,71 @@ func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the applications in place.
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application) error {
|
||||
if len(applicationSetIgnoreDifferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
generatedAppCopy := generatedApp.DeepCopy()
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
|
||||
WithNoCache().
|
||||
Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build diff config: %w", err)
|
||||
}
|
||||
unstructuredFound, err := appToUnstructured(found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
unstructuredGenerated, err := appToUnstructured(generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to normalize application spec: %w", err)
|
||||
}
|
||||
if len(result.Lives) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Lives))
|
||||
}
|
||||
foundJsonNormalized, err := json.Marshal(result.Lives[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
foundNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(foundJsonNormalized, &foundNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app to json: %w", err)
|
||||
}
|
||||
if len(result.Targets) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
|
||||
}
|
||||
foundNormalized.DeepCopyInto(found)
|
||||
generatedJsonNormalized, err := json.Marshal(result.Targets[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
generatedAppNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(generatedJsonNormalized, &generatedAppNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
|
||||
}
|
||||
generatedAppNormalized.DeepCopyInto(generatedApp)
|
||||
// Prohibit jq queries from mutating silly things.
|
||||
generatedApp.TypeMeta = generatedAppCopy.TypeMeta
|
||||
generatedApp.Name = generatedAppCopy.Name
|
||||
generatedApp.Namespace = generatedAppCopy.Namespace
|
||||
generatedApp.Operation = generatedAppCopy.Operation
|
||||
return nil
|
||||
}
|
||||
|
||||
func appToUnstructured(app client.Object) (*unstructured.Unstructured, error) {
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: u}, nil
|
||||
}
|
||||
|
||||
234
applicationset/utils/createOrUpdate_test.go
Normal file
234
applicationset/utils/createOrUpdate_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_applyIgnoreDifferences(t *testing.T) {
|
||||
appMeta := metav1.TypeMeta{
|
||||
APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
|
||||
Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
|
||||
foundApp string
|
||||
generatedApp string
|
||||
expectedApp string
|
||||
}{
|
||||
{
|
||||
name: "empty ignoreDifferences",
|
||||
foundApp: `
|
||||
spec: {}`,
|
||||
generatedApp: `
|
||||
spec: {}`,
|
||||
expectedApp: `
|
||||
spec: {}`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore target revision with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: bar`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
|
||||
name: "ignore helm parameter with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: v1.0.0
|
||||
- name: another
|
||||
value: value`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore auto-sync in appset when it's not in the cluster with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
name: "ignore auto-sync in the cluster when it's not in the appset with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
|
||||
name: "ignore a one-off annotation with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
|
||||
},
|
||||
foundApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
generatedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
some.other: annotation`,
|
||||
expectedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
|
||||
name: "ignore the source.plugin field with a json pointer",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JSONPointers: []string{"/spec/source/plugin"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com/wrong`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
|
||||
require.NoError(t, err, tc.foundApp)
|
||||
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
|
||||
require.NoError(t, err, tc.generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp)
|
||||
require.NoError(t, err)
|
||||
yamlFound, err := yaml.Marshal(tc.foundApp)
|
||||
require.NoError(t, err)
|
||||
yamlExpected, err := yaml.Marshal(tc.expectedApp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(yamlExpected), string(yamlFound))
|
||||
})
|
||||
}
|
||||
}
|
||||
71
applicationset/utils/template_functions.go
Normal file
71
applicationset/utils/template_functions.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// toYAML takes an interface, marshals it to yaml, and returns a string. It will
|
||||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toYAML(v interface{}) (string, error) {
|
||||
data, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
// Swallow errors inside of a template.
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSuffix(string(data), "\n"), nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAML converts a YAML document into a map[string]interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string into
|
||||
// m["Error"] in the returned map.
|
||||
func fromYAML(str string) (map[string]interface{}, error) {
|
||||
m := map[string]interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAMLArray converts a YAML array into a []interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string as
|
||||
// the first and only item in the returned array.
|
||||
func fromYAMLArray(str string) ([]interface{}, error) {
|
||||
a := []interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
@@ -32,6 +32,9 @@ func init() {
|
||||
delete(sprigFuncMap, "expandenv")
|
||||
delete(sprigFuncMap, "getHostByName")
|
||||
sprigFuncMap["normalize"] = SanitizeName
|
||||
sprigFuncMap["toYaml"] = toYAML
|
||||
sprigFuncMap["fromYaml"] = fromYAML
|
||||
sprigFuncMap["fromYamlArray"] = fromYAMLArray
|
||||
}
|
||||
|
||||
type Renderer interface {
|
||||
@@ -431,23 +434,6 @@ func NormalizeBitbucketBasePath(basePath string) string {
|
||||
return basePath
|
||||
}
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
|
||||
func getTlsConfigWithCACert(scmRootCAPath string) *tls.Config {
|
||||
|
||||
tlsConfig := &tls.Config{}
|
||||
|
||||
@@ -555,6 +555,64 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
templateOptions: []string{"missingkey=error"},
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: :1:6: executing "" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
},
|
||||
{
|
||||
name: "toYaml",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
params: map[string]interface{}{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": map[string]interface{}{
|
||||
"bool": true,
|
||||
"number": 2,
|
||||
"str": "Hello world",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "toYaml Error",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: :1:3: executing \"\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
params: map[string]interface{}{
|
||||
"foo": func(test *string) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
params: map[string]interface{}{
|
||||
"value": "hello: world",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml error",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: :1:8: executing \"\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
params: map[string]interface{}{
|
||||
"value": "- hello world\n- bonjour tout le monde",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray error",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: :1:3: executing \"\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
85
applicationset/webhook/testdata/azuredevops-pull-request.json
vendored
Normal file
85
applicationset/webhook/testdata/azuredevops-pull-request.json
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"id": "2ab4e3d3-b7a6-425e-92b1-5a9982c1269e",
|
||||
"eventType": "git.pullrequest.created",
|
||||
"publisherId": "tfs",
|
||||
"scope": "all",
|
||||
"message": {
|
||||
"text": "Jamal Hartnett created a new pull request",
|
||||
"html": "Jamal Hartnett created a new pull request",
|
||||
"markdown": "Jamal Hartnett created a new pull request"
|
||||
},
|
||||
"detailedMessage": {
|
||||
"text": "Jamal Hartnett created a new pull request\r\n\r\n- Merge status: Succeeded\r\n- Merge commit: eef717(https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72)\r\n",
|
||||
"html": "Jamal Hartnett created a new pull request\r\n<ul>\r\n<li>Merge status: Succeeded</li>\r\n<li>Merge commit: <a href=\"https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72\">eef717</a></li>\r\n</ul>",
|
||||
"markdown": "Jamal Hartnett created a new pull request\r\n\r\n+ Merge status: Succeeded\r\n+ Merge commit: [eef717](https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72)\r\n"
|
||||
},
|
||||
"resource": {
|
||||
"repository": {
|
||||
"id": "4bc14d40-c903-45e2-872e-0462c7748079",
|
||||
"name": "Fabrikam",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079",
|
||||
"project": {
|
||||
"id": "6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"name": "DefaultCollection",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/projects/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"state": "wellFormed"
|
||||
},
|
||||
"defaultBranch": "refs/heads/master",
|
||||
"remoteUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_git/Fabrikam"
|
||||
},
|
||||
"pullRequestId": 1,
|
||||
"status": "active",
|
||||
"createdBy": {
|
||||
"id": "54d125f7-69f7-4191-904f-c5b96b6261c8",
|
||||
"displayName": "Jamal Hartnett",
|
||||
"uniqueName": "fabrikamfiber4@hotmail.com",
|
||||
"url": "https://vssps.dev.azure.com/fabrikam/_apis/Identities/54d125f7-69f7-4191-904f-c5b96b6261c8",
|
||||
"imageUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_api/_common/identityImage?id=54d125f7-69f7-4191-904f-c5b96b6261c8"
|
||||
},
|
||||
"creationDate": "2014-06-17T16:55:46.589889Z",
|
||||
"title": "my first pull request",
|
||||
"description": " - test2\r\n",
|
||||
"sourceRefName": "refs/heads/mytopic",
|
||||
"targetRefName": "refs/heads/master",
|
||||
"mergeStatus": "succeeded",
|
||||
"mergeId": "a10bb228-6ba6-4362-abd7-49ea21333dbd",
|
||||
"lastMergeSourceCommit": {
|
||||
"commitId": "53d54ac915144006c2c9e90d2c7d3880920db49c",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/53d54ac915144006c2c9e90d2c7d3880920db49c"
|
||||
},
|
||||
"lastMergeTargetCommit": {
|
||||
"commitId": "a511f535b1ea495ee0c903badb68fbc83772c882",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/a511f535b1ea495ee0c903badb68fbc83772c882"
|
||||
},
|
||||
"lastMergeCommit": {
|
||||
"commitId": "eef717f69257a6333f221566c1c987dc94cc0d72",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72"
|
||||
},
|
||||
"reviewers": [
|
||||
{
|
||||
"reviewerUrl": null,
|
||||
"vote": 0,
|
||||
"id": "2ea2d095-48f9-4cd6-9966-62f6f574096c",
|
||||
"displayName": "[Mobile]\\Mobile Team",
|
||||
"uniqueName": "vstfs:///Classification/TeamProject/f0811a3b-8c8a-4e43-a3bf-9a049b4835bd\\Mobile Team",
|
||||
"url": "https://vssps.dev.azure.com/fabrikam/_apis/Identities/2ea2d095-48f9-4cd6-9966-62f6f574096c",
|
||||
"imageUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_api/_common/identityImage?id=2ea2d095-48f9-4cd6-9966-62f6f574096c",
|
||||
"isContainer": true
|
||||
}
|
||||
],
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/pullRequests/1"
|
||||
},
|
||||
"resourceVersion": "1.0",
|
||||
"resourceContainers": {
|
||||
"collection": {
|
||||
"id": "c12d0eb8-e382-443b-9f9c-c52cba5014c2"
|
||||
},
|
||||
"account": {
|
||||
"id": "f844ec47-a9db-4511-8281-8b63f4eaf94e"
|
||||
},
|
||||
"project": {
|
||||
"id": "be9b3917-87e6-42a4-a549-2bc06a7a878f"
|
||||
}
|
||||
},
|
||||
"createdDate": "2016-09-19T13:03:27.2879096Z"
|
||||
}
|
||||
76
applicationset/webhook/testdata/azuredevops-push.json
vendored
Normal file
76
applicationset/webhook/testdata/azuredevops-push.json
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"id": "03c164c2-8912-4d5e-8009-3707d5f83734",
|
||||
"eventType": "git.push",
|
||||
"publisherId": "tfs",
|
||||
"scope": "all",
|
||||
"message": {
|
||||
"text": "Jamal Hartnett pushed updates to branch master of repository Fabrikam-Fiber-Git.",
|
||||
"html": "Jamal Hartnett pushed updates to branch master of repository Fabrikam-Fiber-Git.",
|
||||
"markdown": "Jamal Hartnett pushed updates to branch `master` of repository `Fabrikam-Fiber-Git`."
|
||||
},
|
||||
"detailedMessage": {
|
||||
"text": "Jamal Hartnett pushed 1 commit to branch master of repository Fabrikam-Fiber-Git.\n - Fixed bug in web.config file 33b55f7c",
|
||||
"html": "Jamal Hartnett pushed 1 commit to branch <a href=\"https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/#version=GBmaster\">master</a> of repository <a href=\"https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/\">Fabrikam-Fiber-Git</a>.\n<ul>\n<li>Fixed bug in web.config file <a href=\"https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74\">33b55f7c</a>\n</ul>",
|
||||
"markdown": "Jamal Hartnett pushed 1 commit to branch [master](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/#version=GBmaster) of repository [Fabrikam-Fiber-Git](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/).\n* Fixed bug in web.config file [33b55f7c](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74)"
|
||||
},
|
||||
"resource": {
|
||||
"commits": [
|
||||
{
|
||||
"commitId": "33b55f7cb7e7e245323987634f960cf4a6e6bc74",
|
||||
"author": {
|
||||
"name": "Jamal Hartnett",
|
||||
"email": "fabrikamfiber4@hotmail.com",
|
||||
"date": "2015-02-25T19:01:00Z"
|
||||
},
|
||||
"committer": {
|
||||
"name": "Jamal Hartnett",
|
||||
"email": "fabrikamfiber4@hotmail.com",
|
||||
"date": "2015-02-25T19:01:00Z"
|
||||
},
|
||||
"comment": "Fixed bug in web.config file",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74"
|
||||
}
|
||||
],
|
||||
"refUpdates": [
|
||||
{
|
||||
"name": "refs/heads/master",
|
||||
"oldObjectId": "aad331d8d3b131fa9ae03cf5e53965b51942618a",
|
||||
"newObjectId": "33b55f7cb7e7e245323987634f960cf4a6e6bc74"
|
||||
}
|
||||
],
|
||||
"repository": {
|
||||
"id": "278d5cd2-584d-4b63-824a-2ba458937249",
|
||||
"name": "Fabrikam-Fiber-Git",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/repos/git/repositories/278d5cd2-584d-4b63-824a-2ba458937249",
|
||||
"project": {
|
||||
"id": "6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"name": "DefaultCollection",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/projects/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"state": "wellFormed"
|
||||
},
|
||||
"defaultBranch": "refs/heads/master",
|
||||
"remoteUrl": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"
|
||||
},
|
||||
"pushedBy": {
|
||||
"id": "00067FFED5C7AF52@Live.com",
|
||||
"displayName": "Jamal Hartnett",
|
||||
"uniqueName": "Windows Live ID\\fabrikamfiber4@hotmail.com"
|
||||
},
|
||||
"pushId": 14,
|
||||
"date": "2014-05-02T19:17:13.3309587Z",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/repos/git/repositories/278d5cd2-584d-4b63-824a-2ba458937249/pushes/14"
|
||||
},
|
||||
"resourceVersion": "1.0",
|
||||
"resourceContainers": {
|
||||
"collection": {
|
||||
"id": "c12d0eb8-e382-443b-9f9c-c52cba5014c2"
|
||||
},
|
||||
"account": {
|
||||
"id": "f844ec47-a9db-4511-8281-8b63f4eaf94e"
|
||||
},
|
||||
"project": {
|
||||
"id": "be9b3917-87e6-42a4-a549-2bc06a7a878f"
|
||||
}
|
||||
},
|
||||
"createdDate": "2016-09-19T13:03:27.0379153Z"
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package webhook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
@@ -19,17 +20,24 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
"github.com/go-playground/webhooks/v6/azuredevops"
|
||||
"github.com/go-playground/webhooks/v6/github"
|
||||
"github.com/go-playground/webhooks/v6/gitlab"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
errBasicAuthVerificationFailed = errors.New("basic auth verification failed")
|
||||
)
|
||||
|
||||
type WebhookHandler struct {
|
||||
namespace string
|
||||
github *github.Webhook
|
||||
gitlab *gitlab.Webhook
|
||||
client client.Client
|
||||
generators map[string]generators.Generator
|
||||
namespace string
|
||||
github *github.Webhook
|
||||
gitlab *gitlab.Webhook
|
||||
azuredevops *azuredevops.Webhook
|
||||
azuredevopsAuthHandler func(r *http.Request) error
|
||||
client client.Client
|
||||
generators map[string]generators.Generator
|
||||
}
|
||||
|
||||
type gitGeneratorInfo struct {
|
||||
@@ -39,8 +47,14 @@ type gitGeneratorInfo struct {
|
||||
}
|
||||
|
||||
type prGeneratorInfo struct {
|
||||
Github *prGeneratorGithubInfo
|
||||
Gitlab *prGeneratorGitlabInfo
|
||||
Azuredevops *prGeneratorAzuredevopsInfo
|
||||
Github *prGeneratorGithubInfo
|
||||
Gitlab *prGeneratorGitlabInfo
|
||||
}
|
||||
|
||||
type prGeneratorAzuredevopsInfo struct {
|
||||
Repo string
|
||||
Project string
|
||||
}
|
||||
|
||||
type prGeneratorGithubInfo struct {
|
||||
@@ -68,13 +82,28 @@ func NewWebhookHandler(namespace string, argocdSettingsMgr *argosettings.Setting
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to init GitLab webhook: %v", err)
|
||||
}
|
||||
azuredevopsHandler, err := azuredevops.New()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to init Azure DevOps webhook: %v", err)
|
||||
}
|
||||
azuredevopsAuthHandler := func(r *http.Request) error {
|
||||
if argocdSettings.WebhookAzureDevOpsUsername != "" && argocdSettings.WebhookAzureDevOpsPassword != "" {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if !ok || username != argocdSettings.WebhookAzureDevOpsUsername || password != argocdSettings.WebhookAzureDevOpsPassword {
|
||||
return errBasicAuthVerificationFailed
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return &WebhookHandler{
|
||||
namespace: namespace,
|
||||
github: githubHandler,
|
||||
gitlab: gitlabHandler,
|
||||
client: client,
|
||||
generators: generators,
|
||||
namespace: namespace,
|
||||
github: githubHandler,
|
||||
gitlab: gitlabHandler,
|
||||
azuredevops: azuredevopsHandler,
|
||||
azuredevopsAuthHandler: azuredevopsAuthHandler,
|
||||
client: client,
|
||||
generators: generators,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -125,6 +154,14 @@ func (h *WebhookHandler) Handler(w http.ResponseWriter, r *http.Request) {
|
||||
payload, err = h.github.Parse(r, github.PushEvent, github.PullRequestEvent, github.PingEvent)
|
||||
case r.Header.Get("X-Gitlab-Event") != "":
|
||||
payload, err = h.gitlab.Parse(r, gitlab.PushEvents, gitlab.TagEvents, gitlab.MergeRequestEvents)
|
||||
case r.Header.Get("X-Vss-Activityid") != "":
|
||||
if err = h.azuredevopsAuthHandler(r); err != nil {
|
||||
if errors.Is(err, errBasicAuthVerificationFailed) {
|
||||
log.WithField(common.SecurityField, common.SecurityHigh).Infof("Azure DevOps webhook basic auth verification failed")
|
||||
}
|
||||
} else {
|
||||
payload, err = h.azuredevops.Parse(r, azuredevops.GitPushEventType, azuredevops.GitPullRequestCreatedEventType, azuredevops.GitPullRequestUpdatedEventType, azuredevops.GitPullRequestMergedEventType)
|
||||
}
|
||||
default:
|
||||
log.Debug("Ignoring unknown webhook event")
|
||||
http.Error(w, "Unknown webhook event", http.StatusBadRequest)
|
||||
@@ -164,6 +201,12 @@ func getGitGeneratorInfo(payload interface{}) *gitGeneratorInfo {
|
||||
webURL = payload.Project.WebURL
|
||||
revision = parseRevision(payload.Ref)
|
||||
touchedHead = payload.Project.DefaultBranch == revision
|
||||
case azuredevops.GitPushEvent:
|
||||
// See: https://learn.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#git.push
|
||||
webURL = payload.Resource.Repository.RemoteURL
|
||||
revision = parseRevision(payload.Resource.RefUpdates[0].Name)
|
||||
touchedHead = payload.Resource.RefUpdates[0].Name == payload.Resource.Repository.DefaultBranch
|
||||
// unfortunately, Azure DevOps doesn't provide a list of changed files
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -229,6 +272,18 @@ func getPRGeneratorInfo(payload interface{}) *prGeneratorInfo {
|
||||
Project: strconv.FormatInt(payload.ObjectAttributes.TargetProjectID, 10),
|
||||
APIHostname: urlObj.Hostname(),
|
||||
}
|
||||
case azuredevops.GitPullRequestEvent:
|
||||
if !isAllowedAzureDevOpsPullRequestAction(string(payload.EventType)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
repo := payload.Resource.Repository.Name
|
||||
project := payload.Resource.Repository.Project.Name
|
||||
|
||||
info.Azuredevops = &prGeneratorAzuredevopsInfo{
|
||||
Repo: repo,
|
||||
Project: project,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -256,6 +311,13 @@ var gitlabAllowedPullRequestActions = []string{
|
||||
"merge",
|
||||
}
|
||||
|
||||
// azuredevopsAllowedPullRequestActions is a list of Azure DevOps actions that allow refresh
|
||||
var azuredevopsAllowedPullRequestActions = []string{
|
||||
"git.pullrequest.created",
|
||||
"git.pullrequest.merged",
|
||||
"git.pullrequest.updated",
|
||||
}
|
||||
|
||||
func isAllowedGithubPullRequestAction(action string) bool {
|
||||
for _, allow := range githubAllowedPullRequestActions {
|
||||
if allow == action {
|
||||
@@ -274,6 +336,15 @@ func isAllowedGitlabPullRequestAction(action string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isAllowedAzureDevOpsPullRequestAction(action string) bool {
|
||||
for _, allow := range azuredevopsAllowedPullRequestActions {
|
||||
if allow == action {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldRefreshGitGenerator(gen *v1alpha1.GitGenerator, info *gitGeneratorInfo) bool {
|
||||
if gen == nil || info == nil {
|
||||
return false
|
||||
@@ -359,6 +430,16 @@ func shouldRefreshPRGenerator(gen *v1alpha1.PullRequestGenerator, info *prGenera
|
||||
return true
|
||||
}
|
||||
|
||||
if gen.AzureDevOps != nil && info.Azuredevops != nil {
|
||||
if gen.AzureDevOps.Project != info.Azuredevops.Project {
|
||||
return false
|
||||
}
|
||||
if gen.AzureDevOps.Repo != info.Azuredevops.Repo {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -146,6 +146,24 @@ func TestWebhookHandler(t *testing.T) {
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: false,
|
||||
},
|
||||
{
|
||||
desc: "WebHook from a Azure DevOps repository via Commit",
|
||||
headerKey: "X-Vss-Activityid",
|
||||
headerValue: "Push Hook",
|
||||
payloadFile: "azuredevops-push.json",
|
||||
effectedAppSets: []string{"git-azure-devops", "plugin", "matrix-pull-request-github-plugin"},
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: true,
|
||||
},
|
||||
{
|
||||
desc: "WebHook from a Azure DevOps repository via pull request event",
|
||||
headerKey: "X-Vss-Activityid",
|
||||
headerValue: "Pull Request Hook",
|
||||
payloadFile: "azuredevops-pull-request.json",
|
||||
effectedAppSets: []string{"pull-request-azure-devops", "plugin", "matrix-pull-request-github-plugin"},
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: true,
|
||||
},
|
||||
}
|
||||
|
||||
namespace := "test"
|
||||
@@ -161,8 +179,10 @@ func TestWebhookHandler(t *testing.T) {
|
||||
fc := fake.NewClientBuilder().WithScheme(scheme).WithObjects(
|
||||
fakeAppWithGitGenerator("git-github", namespace, "https://github.com/org/repo"),
|
||||
fakeAppWithGitGenerator("git-gitlab", namespace, "https://gitlab/group/name"),
|
||||
fakeAppWithGitGenerator("git-azure-devops", namespace, "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"),
|
||||
fakeAppWithGithubPullRequestGenerator("pull-request-github", namespace, "Codertocat", "Hello-World"),
|
||||
fakeAppWithGitlabPullRequestGenerator("pull-request-gitlab", namespace, "100500"),
|
||||
fakeAppWithAzureDevOpsPullRequestGenerator("pull-request-azure-devops", namespace, "DefaultCollection", "Fabrikam"),
|
||||
fakeAppWithPluginGenerator("plugin", namespace),
|
||||
fakeAppWithMatrixAndGitGenerator("matrix-git-github", namespace, "https://github.com/org/repo"),
|
||||
fakeAppWithMatrixAndPullRequestGenerator("matrix-pull-request-github", namespace, "Codertocat", "Hello-World"),
|
||||
@@ -338,6 +358,27 @@ func fakeAppWithGithubPullRequestGenerator(name, namespace, owner, repo string)
|
||||
}
|
||||
}
|
||||
|
||||
func fakeAppWithAzureDevOpsPullRequestGenerator(name, namespace, project, repo string) *v1alpha1.ApplicationSet {
|
||||
return &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
PullRequest: &v1alpha1.PullRequestGenerator{
|
||||
AzureDevOps: &v1alpha1.PullRequestGeneratorAzureDevOps{
|
||||
Project: project,
|
||||
Repo: repo,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func fakeAppWithMatrixAndGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet {
|
||||
return &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -6055,6 +6055,30 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetResourceIgnoreDifferences": {
|
||||
"description": "ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live\napplications when applying changes from generated applications.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"jqPathExpressions": {
|
||||
"description": "JQPathExpressions is a list of JQ path expressions to fields to ignore differences for.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"jsonPointers": {
|
||||
"description": "JSONPointers is a list of JSON pointers to fields to ignore differences for.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetRolloutStep": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -6103,6 +6127,12 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"ignoreApplicationDifferences": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetResourceIgnoreDifferences"
|
||||
}
|
||||
},
|
||||
"preservedFields": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationPreservedFields"
|
||||
},
|
||||
@@ -6393,6 +6423,13 @@
|
||||
"type": "string",
|
||||
"title": "Namespace sets the namespace that Kustomize adds to all resources"
|
||||
},
|
||||
"patches": {
|
||||
"type": "array",
|
||||
"title": "Patches is a list of Kustomize patches",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizePatch"
|
||||
}
|
||||
},
|
||||
"replicas": {
|
||||
"type": "array",
|
||||
"title": "Replicas is a list of Kustomize Replicas override specifications",
|
||||
@@ -7265,6 +7302,20 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeGvk": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"group": {
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeOptions": {
|
||||
"type": "object",
|
||||
"title": "KustomizeOptions are options for kustomize to use when building manifests",
|
||||
@@ -7279,6 +7330,26 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizePatch": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"options": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"patch": {
|
||||
"type": "string"
|
||||
},
|
||||
"path": {
|
||||
"type": "string"
|
||||
},
|
||||
"target": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeSelector"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeReplica": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -7291,6 +7362,34 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeResId": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"gvk": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeGvk"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeSelector": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotationSelector": {
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"type": "string"
|
||||
},
|
||||
"resId": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeResId"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ListGenerator": {
|
||||
"type": "object",
|
||||
"title": "ListGenerator include items info",
|
||||
|
||||
@@ -30,11 +30,13 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/argoproj/argo-cd/v2/util/tls"
|
||||
"github.com/argoproj/argo-cd/v2/util/trace"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-application-controller"
|
||||
cliName = common.ApplicationController
|
||||
// Default time in seconds for application resync period
|
||||
defaultAppResyncPeriod = 180
|
||||
// Default time in seconds for application hard resync period
|
||||
@@ -43,28 +45,29 @@ const (
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
metricsCacheExpiration time.Duration
|
||||
metricsAplicationLabels []string
|
||||
kubectlParallelismLimit int64
|
||||
cacheSource func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
repoServerPlaintext bool
|
||||
repoServerStrictTLS bool
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
applicationNamespaces []string
|
||||
persistResourceHealth bool
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
metricsCacheExpiration time.Duration
|
||||
metricsAplicationLabels []string
|
||||
kubectlParallelismLimit int64
|
||||
cacheSource func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
repoServerPlaintext bool
|
||||
repoServerStrictTLS bool
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
applicationNamespaces []string
|
||||
persistResourceHealth bool
|
||||
shardingAlgorithm string
|
||||
enableDynamicClusterDistribution bool
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -92,7 +95,7 @@ func NewCommand() *cobra.Command {
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
|
||||
config.UserAgent = fmt.Sprintf("argocd-application-controller/%s (%s)", vers.Version, vers.Platform)
|
||||
config.UserAgent = fmt.Sprintf("%s/%s (%s)", common.DefaultApplicationControllerName, vers.Version, vers.Platform)
|
||||
|
||||
kubeClient := kubernetes.NewForConfigOrDie(config)
|
||||
appClient := appclientset.NewForConfigOrDie(config)
|
||||
@@ -137,7 +140,8 @@ func NewCommand() *cobra.Command {
|
||||
appController.InvalidateProjectsCache()
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm)
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -201,27 +205,62 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that applications are allowed to be reconciled from")
|
||||
command.Flags().BoolVar(&persistResourceHealth, "persist-resource-health", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH", true), "Enables storing the managed resources health in the Application CRD")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string) sharding.ClusterFilterFunction {
|
||||
replicas := env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {
|
||||
|
||||
var replicas int
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
|
||||
if err != nil && kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
}
|
||||
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicas = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
var clusterFilter func(cluster *v1alpha1.Cluster) bool
|
||||
if replicas > 1 {
|
||||
if shard < 0 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil {
|
||||
|
||||
var err error
|
||||
shard, err = sharding.InferShard()
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard)
|
||||
if !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shard < 0 {
|
||||
var err error
|
||||
shard, err = sharding.InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Processing clusters from shard %d", shard)
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm)
|
||||
clusterFilter = sharding.GetClusterFilter(distributionFunction, shard)
|
||||
clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@ func NewCommand() *cobra.Command {
|
||||
argocdRepoServerStrictTLS bool
|
||||
configMapName string
|
||||
secretName string
|
||||
applicationNamespaces []string
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "controller",
|
||||
@@ -138,7 +139,7 @@ func NewCommand() *cobra.Command {
|
||||
log.Infof("serving metrics on port %d", metricsPort)
|
||||
log.Infof("loading configuration %d", metricsPort)
|
||||
|
||||
ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, appLabelSelector, registry, secretName, configMapName)
|
||||
ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, applicationNamespaces, appLabelSelector, registry, secretName, configMapName)
|
||||
err = ctrl.Init(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize controller: %w", err)
|
||||
@@ -161,5 +162,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name")
|
||||
command.Flags().StringVar(&secretName, "secret-name", "argocd-notifications-secret", "Set notifications Secret name")
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that this controller should send notifications for")
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -269,7 +269,6 @@ func parentChildDetails(appIf application.ApplicationServiceClient, ctx context.
|
||||
errors.CheckError(err)
|
||||
|
||||
for _, node := range resourceTree.Nodes {
|
||||
|
||||
mapUidToNode[node.UID] = node
|
||||
|
||||
if len(node.ParentRefs) > 0 {
|
||||
@@ -282,7 +281,6 @@ func parentChildDetails(appIf application.ApplicationServiceClient, ctx context.
|
||||
} else {
|
||||
parentNode[node.UID] = struct{}{}
|
||||
}
|
||||
|
||||
}
|
||||
return mapUidToNode, mapParentToChild, parentNode
|
||||
}
|
||||
@@ -316,13 +314,11 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
showParams bool
|
||||
showOperation bool
|
||||
)
|
||||
|
||||
var command = &cobra.Command{
|
||||
Use: "get APPNAME",
|
||||
Short: "Get application details",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
output, _ = c.Flags().GetString("output")
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -362,22 +358,14 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
}
|
||||
case "tree":
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildDetails(appIf, ctx, appName, appNs)
|
||||
mapNodeNameToResourceState := make(map[string]*resourceState)
|
||||
for _, res := range getResourceStates(app, nil) {
|
||||
mapNodeNameToResourceState[res.Kind+"/"+res.Name] = res
|
||||
}
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
case "tree=detailed":
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildDetails(appIf, ctx, appName, appNs)
|
||||
mapNodeNameToResourceState := make(map[string]*resourceState)
|
||||
for _, res := range getResourceStates(app, nil) {
|
||||
mapNodeNameToResourceState[res.Kind+"/"+res.Name] = res
|
||||
}
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeViewDetailed(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
@@ -1506,6 +1494,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
timeout uint
|
||||
selector string
|
||||
resources []string
|
||||
output string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "wait [APPNAME.. | -l selector]",
|
||||
@@ -1554,7 +1543,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
for _, appName := range appNames {
|
||||
_, _, err := waitOnApplicationStatus(ctx, acdClient, appName, timeout, watch, selectedResources)
|
||||
_, _, err := waitOnApplicationStatus(ctx, acdClient, appName, timeout, watch, selectedResources, output)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -1567,6 +1556,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
|
||||
command.Flags().BoolVar(&watch.operation, "operation", false, "Wait for pending operations")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1622,6 +1612,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
diffChanges bool
|
||||
diffChangesConfirm bool
|
||||
projects []string
|
||||
output string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "sync [APPNAME... | -l selector | --project project-name]",
|
||||
@@ -1650,17 +1641,13 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
argocd app sync my-app --resource argoproj.io:Rollout:my-namespace/my-rollout`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) == 0 && selector == "" && len(projects) == 0 {
|
||||
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(args) > 1 && selector != "" {
|
||||
log.Fatal("Cannot use selector option when application name(s) passed as argument(s)")
|
||||
}
|
||||
|
||||
acdClient := headless.NewClientOrDie(clientOpts, c)
|
||||
conn, appIf := acdClient.NewApplicationClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
@@ -1705,6 +1692,8 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("The name of the app is ", appName)
|
||||
|
||||
for _, mfst := range res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
errors.CheckError(err)
|
||||
@@ -1864,7 +1853,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
errors.CheckError(err)
|
||||
|
||||
if !async {
|
||||
app, opState, err := waitOnApplicationStatus(ctx, acdClient, appQualifiedName, timeout, watchOpts{operation: true}, selectedResources)
|
||||
app, opState, err := waitOnApplicationStatus(ctx, acdClient, appQualifiedName, timeout, watchOpts{operation: true}, selectedResources, output)
|
||||
errors.CheckError(err)
|
||||
|
||||
if !dryRun {
|
||||
@@ -1905,6 +1894,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&diffChangesConfirm, "assumeYes", false, "Assume yes as answer for all user queries or prompts")
|
||||
command.Flags().BoolVar(&diffChanges, "preview-changes", false, "Preview difference against the target and live state before syncing app and wait for user confirmation")
|
||||
command.Flags().StringArrayVar(&projects, "project", []string{}, "Sync apps that belong to the specified projects. This option may be specified repeatedly.")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -2085,12 +2075,26 @@ func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string
|
||||
return synced && healthCheckPassed && operational
|
||||
}
|
||||
|
||||
// resourceParentChild gets the latest state of the app and the latest state of the app's resource tree and then
|
||||
// constructs the necessary data structures to print the app as a tree.
|
||||
func resourceParentChild(ctx context.Context, acdClient argocdclient.Client, appName string, appNs string) (map[string]argoappv1.ResourceNode, map[string][]string, map[string]struct{}, map[string]*resourceState) {
|
||||
_, appIf := acdClient.NewApplicationClientOrDie()
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildDetails(appIf, ctx, appName, appNs)
|
||||
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: pointer.String(appName), AppNamespace: pointer.String(appNs)})
|
||||
errors.CheckError(err)
|
||||
mapNodeNameToResourceState := make(map[string]*resourceState)
|
||||
for _, res := range getResourceStates(app, nil) {
|
||||
mapNodeNameToResourceState[res.Kind+"/"+res.Name] = res
|
||||
}
|
||||
return mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState
|
||||
}
|
||||
|
||||
const waitFormatString = "%s\t%5s\t%10s\t%10s\t%20s\t%8s\t%7s\t%10s\t%s\n"
|
||||
|
||||
// waitOnApplicationStatus watches an application and blocks until either the desired watch conditions
|
||||
// are fulfiled or we reach the timeout. Returns the app once desired conditions have been filled.
|
||||
// are fulfilled or we reach the timeout. Returns the app once desired conditions have been filled.
|
||||
// Additionally return the operationState at time of fulfilment (which may be different than returned app).
|
||||
func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client, appName string, timeout uint, watch watchOpts, selectedResources []*argoappv1.SyncOperationResource) (*argoappv1.Application, *argoappv1.OperationState, error) {
|
||||
func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client, appName string, timeout uint, watch watchOpts, selectedResources []*argoappv1.SyncOperationResource, output string) (*argoappv1.Application, *argoappv1.OperationState, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -2122,18 +2126,49 @@ func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client,
|
||||
printOperationResult(app.Status.OperationState)
|
||||
}
|
||||
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 5, 0, 2, ' ', 0)
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
err := PrintResource(app, output)
|
||||
errors.CheckError(err)
|
||||
case "wide", "":
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
}
|
||||
case "tree":
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
case "tree=detailed":
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeViewDetailed(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
if timeout != 0 {
|
||||
time.AfterFunc(time.Duration(timeout)*time.Second, func() {
|
||||
_, appClient := acdClient.NewApplicationClientOrDie()
|
||||
app, err := appClient.Get(ctx, &application.ApplicationQuery{
|
||||
Name: &appRealName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
fmt.Println()
|
||||
fmt.Println("This is the state of the app after `wait` timed out:")
|
||||
printFinalStatus(app)
|
||||
cancel()
|
||||
fmt.Println()
|
||||
fmt.Println("The command timed out waiting for the conditions to be met.")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2341,13 +2376,13 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
var (
|
||||
prune bool
|
||||
timeout uint
|
||||
output string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "rollback APPNAME [ID]",
|
||||
Short: "Rollback application to a previous deployed version by History ID, omitted will Rollback to the previous version",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -2381,12 +2416,13 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
|
||||
_, _, err = waitOnApplicationStatus(ctx, acdClient, app.QualifiedName(), timeout, watchOpts{
|
||||
operation: true,
|
||||
}, nil)
|
||||
}, nil, output)
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -255,7 +255,6 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
|
||||
Short: "List resource of application",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
output, _ = c.Flags().GetString("output")
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -142,7 +142,10 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
|
||||
}
|
||||
defer io.Close(closer)
|
||||
_, err = versionClient.Version(ctx, &empty.Empty{})
|
||||
return fmt.Errorf("failed to get version: %w", err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get version: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartLocalServer allows executing command in a headless mode: on the fly starts Argo CD API server and
|
||||
@@ -243,7 +246,10 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return fmt.Errorf("all retries failed: %w", err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("all retries failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClientOrDie creates a new API client from a set of config options, or fails fatally if the new client creation fails.
|
||||
|
||||
@@ -27,11 +27,12 @@ func extractHealthStatusAndReason(node v1alpha1.ResourceNode) (healthStatus heal
|
||||
}
|
||||
|
||||
func treeViewAppGet(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentToChildMap map[string][]string, parent v1alpha1.ResourceNode, mapNodeNameToResourceState map[string]*resourceState, w *tabwriter.Writer) {
|
||||
healthStatus, _ := extractHealthStatusAndReason(parent)
|
||||
if mapNodeNameToResourceState[parent.Kind+"/"+parent.Name] != nil {
|
||||
value := mapNodeNameToResourceState[parent.Kind+"/"+parent.Name]
|
||||
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+value.Name, value.Status, value.Health, value.Message)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+parent.Name, "", "", "")
|
||||
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+parent.Name, "", healthStatus, "")
|
||||
}
|
||||
chs := parentToChildMap[parent.UID]
|
||||
for i, childUid := range chs {
|
||||
|
||||
@@ -120,11 +120,16 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
logCtx.Error(err.Error())
|
||||
return strings.TrimSuffix(output, "\n"), err
|
||||
}
|
||||
|
||||
logCtx = logCtx.WithFields(log.Fields{
|
||||
"stderr": stderr.String(),
|
||||
"command": command,
|
||||
})
|
||||
if len(output) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"stderr": stderr.String(),
|
||||
"command": command,
|
||||
}).Warn("Plugin command returned zero output")
|
||||
logCtx.Warn("Plugin command returned zero output")
|
||||
} else {
|
||||
// Log stderr even on successfull commands to help develop plugins
|
||||
logCtx.Info("Plugin command successfull")
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(output, "\n"), nil
|
||||
|
||||
@@ -12,6 +12,11 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Component names
|
||||
const (
|
||||
ApplicationController = "argocd-application-controller"
|
||||
)
|
||||
|
||||
// Default service addresses and URLS of Argo CD internal services
|
||||
const (
|
||||
// DefaultRepoServerAddr is the gRPC address of the Argo CD repo server
|
||||
@@ -34,6 +39,8 @@ const (
|
||||
// ArgoCDTLSCertsConfigMapName contains TLS certificate data for connecting repositories. Will get mounted as volume to pods
|
||||
ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm"
|
||||
ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm"
|
||||
// ArgoCDAppControllerShardConfigMapName contains the application controller to shard mapping
|
||||
ArgoCDAppControllerShardConfigMapName = "argocd-app-controller-shard-cm"
|
||||
)
|
||||
|
||||
// Some default configurables
|
||||
@@ -109,6 +116,8 @@ const (
|
||||
// RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards
|
||||
RoundRobinShardingAlgorithm = "round-robin"
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
// AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller
|
||||
AppControllerHeartbeatUpdateRetryCount = 3
|
||||
)
|
||||
|
||||
// Dex related constants
|
||||
@@ -209,10 +218,14 @@ const (
|
||||
EnvPauseGenerationRequests = "ARGOCD_PAUSE_GEN_REQUESTS"
|
||||
// EnvControllerReplicas is the number of controller replicas
|
||||
EnvControllerReplicas = "ARGOCD_CONTROLLER_REPLICAS"
|
||||
// EnvControllerHeartbeatTime will update the heartbeat for application controller to claim shard
|
||||
EnvControllerHeartbeatTime = "ARGOCD_CONTROLLER_HEARTBEAT_TIME"
|
||||
// EnvControllerShard is the shard number that should be handled by controller
|
||||
EnvControllerShard = "ARGOCD_CONTROLLER_SHARD"
|
||||
// EnvControllerShardingAlgorithm is the distribution sharding algorithm to be used: legacy or round-robin
|
||||
EnvControllerShardingAlgorithm = "ARGOCD_CONTROLLER_SHARDING_ALGORITHM"
|
||||
//EnvEnableDynamicClusterDistribution enables dynamic sharding (ALPHA)
|
||||
EnvEnableDynamicClusterDistribution = "ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION"
|
||||
// EnvEnableGRPCTimeHistogramEnv enables gRPC metrics collection
|
||||
EnvEnableGRPCTimeHistogramEnv = "ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM"
|
||||
// EnvGithubAppCredsExpirationDuration controls the caching of Github app credentials. This value is in minutes (default: 60)
|
||||
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
informerv1 "k8s.io/client-go/informers/apps/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
@@ -51,6 +54,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
@@ -59,10 +63,12 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/helm"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
settings_util "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
updateOperationStateTimeout = 1 * time.Second
|
||||
updateOperationStateTimeout = 1 * time.Second
|
||||
defaultDeploymentInformerResyncDuration = 10 * time.Second
|
||||
// orphanedIndex contains application which monitor orphaned resources by namespace
|
||||
orphanedIndex = "orphaned"
|
||||
)
|
||||
@@ -105,6 +111,7 @@ type ApplicationController struct {
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
appStateManager AppStateManager
|
||||
stateCache statecache.LiveStateCache
|
||||
statusRefreshTimeout time.Duration
|
||||
@@ -160,7 +167,7 @@ func NewApplicationController(
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"),
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterFilter: clusterFilter,
|
||||
@@ -201,11 +208,35 @@ func NewApplicationController(
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
|
||||
deploymentInformer := factory.Apps().V1().Deployments()
|
||||
|
||||
readinessHealthCheck := func(r *http.Request) error {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
}
|
||||
}
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
|
||||
var err error
|
||||
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, func(r *http.Request) error {
|
||||
return nil
|
||||
}, metricsApplicationLabels)
|
||||
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -220,6 +251,7 @@ func NewApplicationController(
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
ctrl.deploymentInformer = deploymentInformer
|
||||
ctrl.appStateManager = appStateManager
|
||||
ctrl.stateCache = stateCache
|
||||
|
||||
@@ -724,6 +756,7 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
|
||||
go ctrl.appInformer.Run(ctx.Done())
|
||||
go ctrl.projInformer.Run(ctx.Done())
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
|
||||
errors.CheckError(ctrl.stateCache.Init())
|
||||
|
||||
@@ -1326,6 +1359,8 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
|
||||
patchMs := time.Duration(0) // time spent in doing patch/update calls
|
||||
setOpMs := time.Duration(0) // time spent in doing Operation patch calls in autosync
|
||||
appKey, shutdown := ctrl.appRefreshQueue.Get()
|
||||
if shutdown {
|
||||
processNext = false
|
||||
@@ -1367,6 +1402,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
|
||||
logCtx.WithFields(log.Fields{
|
||||
"time_ms": reconcileDuration.Milliseconds(),
|
||||
"patch_ms": patchMs.Milliseconds(),
|
||||
"setop_ms": setOpMs.Milliseconds(),
|
||||
"level": comparisonLevel,
|
||||
"dest-server": origApp.Spec.Destination.Server,
|
||||
"dest-name": origApp.Spec.Destination.Name,
|
||||
@@ -1388,7 +1425,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
}
|
||||
}
|
||||
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1397,7 +1434,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
if hasErrors {
|
||||
app.Status.Sync.Status = appv1.SyncStatusCodeUnknown
|
||||
app.Status.Health.Status = health.HealthStatusUnknown
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
|
||||
if err := ctrl.cache.SetAppResourcesTree(app.InstanceName(ctrl.namespace), &appv1.ApplicationTree{}); err != nil {
|
||||
log.Warnf("failed to set app resource tree: %v", err)
|
||||
@@ -1459,7 +1496,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
}
|
||||
|
||||
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
|
||||
syncErrCond := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
|
||||
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
|
||||
setOpMs = opMS
|
||||
if syncErrCond != nil {
|
||||
app.Status.SetConditions(
|
||||
[]appv1.ApplicationCondition{*syncErrCond},
|
||||
@@ -1487,7 +1525,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
app.Status.SourceType = compareResult.appSourceType
|
||||
app.Status.SourceTypes = compareResult.appSourceTypes
|
||||
app.Status.ControllerNamespace = ctrl.namespace
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1602,7 +1640,7 @@ func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Applica
|
||||
}
|
||||
|
||||
// persistAppStatus persists updates to application status. If no changes were made, it is a no-op
|
||||
func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) {
|
||||
func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) (patchMs time.Duration) {
|
||||
logCtx := log.WithFields(log.Fields{"application": orig.QualifiedName()})
|
||||
if orig.Status.Sync.Status != newStatus.Sync.Status {
|
||||
message := fmt.Sprintf("Updated sync status: %s -> %s", orig.Status.Sync.Status, newStatus.Sync.Status)
|
||||
@@ -1631,6 +1669,11 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
logCtx.Infof("No status changes. Skipping patch")
|
||||
return
|
||||
}
|
||||
// calculate time for path call
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
patchMs = time.Since(start)
|
||||
}()
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
@@ -1638,29 +1681,30 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
} else {
|
||||
logCtx.Infof("Update successful")
|
||||
}
|
||||
return patchMs
|
||||
}
|
||||
|
||||
// autoSync will initiate a sync operation for an application configured with automated sync
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) *appv1.ApplicationCondition {
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) (*appv1.ApplicationCondition, time.Duration) {
|
||||
if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil {
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
|
||||
if app.Operation != nil {
|
||||
logCtx.Infof("Skipping auto-sync: another operation is in progress")
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
if app.DeletionTimestamp != nil && !app.DeletionTimestamp.IsZero() {
|
||||
logCtx.Infof("Skipping auto-sync: deletion in progress")
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
// Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting
|
||||
// a sync when application is already in a Synced or Unknown state
|
||||
if syncStatus.Status != appv1.SyncStatusCodeOutOfSync {
|
||||
logCtx.Infof("Skipping auto-sync: application status is %s", syncStatus.Status)
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
if !app.Spec.SyncPolicy.Automated.Prune {
|
||||
@@ -1673,7 +1717,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
if requirePruneOnly {
|
||||
logCtx.Infof("Skipping auto-sync: need to prune extra resources only but automated prune is disabled")
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1702,10 +1746,10 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
if !attemptPhase.Successful() {
|
||||
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA)
|
||||
message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
|
||||
return nil
|
||||
return nil, 0
|
||||
} else if alreadyAttempted && selfHeal {
|
||||
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
|
||||
for _, resource := range resources {
|
||||
@@ -1720,7 +1764,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
} else {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1735,19 +1779,29 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
if bAllNeedPrune {
|
||||
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
|
||||
logCtx.Warnf(message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
}
|
||||
|
||||
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
start := time.Now()
|
||||
_, err := argo.SetAppOperation(appIf, app.Name, &op)
|
||||
setOpTime := time.Since(start)
|
||||
if err != nil {
|
||||
if goerrors.Is(err, argo.ErrAnotherOperationInProgress) {
|
||||
// skipping auto-sync because another operation is in progress and was not noticed due to stale data in informer
|
||||
// it is safe to skip auto-sync because it is already running
|
||||
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
|
||||
}
|
||||
message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
|
||||
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message, "")
|
||||
logCtx.Info(message)
|
||||
return nil
|
||||
return nil, setOpTime
|
||||
}
|
||||
|
||||
// alreadyAttemptedSync returns whether the most recent sync was performed against the
|
||||
|
||||
@@ -368,7 +368,7 @@ func TestAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -385,7 +385,7 @@ func TestAutoSyncNotAllowEmpty(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.NotNil(t, cond)
|
||||
}
|
||||
|
||||
@@ -398,7 +398,7 @@ func TestAutoSyncAllowEmpty(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
}
|
||||
|
||||
@@ -412,7 +412,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -427,7 +427,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -443,7 +443,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -460,7 +460,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -486,7 +486,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -500,7 +500,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{
|
||||
{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync, RequiresPruning: true},
|
||||
})
|
||||
assert.Nil(t, cond)
|
||||
@@ -538,7 +538,7 @@ func TestAutoSyncIndicateError(t *testing.T) {
|
||||
Source: *app.Spec.Source.DeepCopy(),
|
||||
},
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -581,7 +581,7 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
applister "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
@@ -260,12 +261,12 @@ func (m *MetricsServer) IncKubernetesRequest(app *argoappv1.Application, server,
|
||||
}
|
||||
|
||||
func (m *MetricsServer) IncRedisRequest(failed bool) {
|
||||
m.redisRequestCounter.WithLabelValues(m.hostname, "argocd-application-controller", strconv.FormatBool(failed)).Inc()
|
||||
m.redisRequestCounter.WithLabelValues(m.hostname, common.ApplicationController, strconv.FormatBool(failed)).Inc()
|
||||
}
|
||||
|
||||
// ObserveRedisRequestDuration observes redis request duration
|
||||
func (m *MetricsServer) ObserveRedisRequestDuration(duration time.Duration) {
|
||||
m.redisRequestHistogram.WithLabelValues(m.hostname, "argocd-application-controller").Observe(duration.Seconds())
|
||||
m.redisRequestHistogram.WithLabelValues(m.hostname, common.ApplicationController).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// IncReconcile increments the reconcile counter for an application
|
||||
|
||||
@@ -4,32 +4,57 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
log "github.com/sirupsen/logrus"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
// Make it overridable for testing
|
||||
var osHostnameFunction = os.Hostname
|
||||
|
||||
// Make it overridable for testing
|
||||
var heartbeatCurrentTime = metav1.Now
|
||||
|
||||
var (
|
||||
HeartbeatDuration = env.ParseNumFromEnv(common.EnvControllerHeartbeatTime, 10, 10, 60)
|
||||
HeartbeatTimeout = 3 * HeartbeatDuration
|
||||
)
|
||||
|
||||
const ShardControllerMappingKey = "shardControllerMapping"
|
||||
|
||||
type DistributionFunction func(c *v1alpha1.Cluster) int
|
||||
type ClusterFilterFunction func(c *v1alpha1.Cluster) bool
|
||||
|
||||
// shardApplicationControllerMapping stores the mapping of Shard Number to Application Controller in ConfigMap.
|
||||
// It also stores the heartbeat of last synced time of the application controller.
|
||||
type shardApplicationControllerMapping struct {
|
||||
ShardNumber int
|
||||
ControllerName string
|
||||
HeartbeatTime metav1.Time
|
||||
}
|
||||
|
||||
// GetClusterFilter returns a ClusterFilterFunction which is a function taking a cluster as a parameter
|
||||
// and returns wheter or not the cluster should be processed by a given shard. It calls the distributionFunction
|
||||
// to determine which shard will process the cluster, and if the given shard is equal to the calculated shard
|
||||
// the function will return true.
|
||||
func GetClusterFilter(distributionFunction DistributionFunction, shard int) ClusterFilterFunction {
|
||||
replicas := env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
func GetClusterFilter(db db.ArgoDB, distributionFunction DistributionFunction, shard int) ClusterFilterFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
return func(c *v1alpha1.Cluster) bool {
|
||||
clusterShard := 0
|
||||
if c != nil && c.Shard != nil {
|
||||
@@ -50,12 +75,12 @@ func GetClusterFilter(distributionFunction DistributionFunction, shard int) Clus
|
||||
// the current datas.
|
||||
func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) DistributionFunction {
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := LegacyDistributionFunction()
|
||||
distributionFunction := LegacyDistributionFunction(db)
|
||||
switch shardingAlgorithm {
|
||||
case common.RoundRobinShardingAlgorithm:
|
||||
distributionFunction = RoundRobinDistributionFunction(db)
|
||||
case common.LegacyShardingAlgorithm:
|
||||
distributionFunction = LegacyDistributionFunction()
|
||||
distributionFunction = LegacyDistributionFunction(db)
|
||||
default:
|
||||
log.Warnf("distribution type %s is not supported, defaulting to %s", shardingAlgorithm, common.DefaultShardingAlgorithm)
|
||||
}
|
||||
@@ -67,8 +92,8 @@ func GetDistributionFunction(db db.ArgoDB, shardingAlgorithm string) Distributio
|
||||
// is lightweight and can be distributed easily, however, it does not ensure an homogenous distribution as
|
||||
// some shards may get assigned more clusters than others. It is the legacy function distribution that is
|
||||
// kept for compatibility reasons
|
||||
func LegacyDistributionFunction() DistributionFunction {
|
||||
replicas := env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
func LegacyDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas == 0 {
|
||||
return -1
|
||||
@@ -97,7 +122,7 @@ func LegacyDistributionFunction() DistributionFunction {
|
||||
// clusters +/-1 , but with the drawback of a reshuffling of clusters accross shards in case of some changes
|
||||
// in the cluster list
|
||||
func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
replicas := env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
replicas := db.GetApplicationControllerReplicas()
|
||||
return func(c *v1alpha1.Cluster) int {
|
||||
if replicas > 0 {
|
||||
if c == nil { // in-cluster does not necessarly have a secret assigned. So we are receiving a nil cluster here.
|
||||
@@ -123,7 +148,7 @@ func RoundRobinDistributionFunction(db db.ArgoDB) DistributionFunction {
|
||||
func InferShard() (int, error) {
|
||||
hostname, err := osHostnameFunction()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return -1, err
|
||||
}
|
||||
parts := strings.Split(hostname, "-")
|
||||
if len(parts) == 0 {
|
||||
@@ -162,3 +187,167 @@ func createClusterIndexByClusterIdMap(db db.ArgoDB) map[string]int {
|
||||
}
|
||||
return clusterIndexedByClusterId
|
||||
}
|
||||
|
||||
// GetOrUpdateShardFromConfigMap finds the shard number from the shard mapping configmap. If the shard mapping configmap does not exist,
|
||||
// the function creates the shard mapping configmap.
|
||||
// The function takes the shard number from the environment variable (default value -1, if not set) and passes it to this function.
|
||||
// If the shard value passed to this function is -1, that is, the shard was not set as an environment variable,
|
||||
// we default the shard number to 0 for computing the default config map.
|
||||
func GetOrUpdateShardFromConfigMap(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, replicas, shard int) (int, error) {
|
||||
|
||||
hostname, err := osHostnameFunction()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// fetch the shard mapping configMap
|
||||
shardMappingCM, err := kubeClient.CoreV1().ConfigMaps(settingsMgr.GetNamespace()).Get(context.Background(), common.ArgoCDAppControllerShardConfigMapName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
if !kubeerrors.IsNotFound(err) {
|
||||
return -1, fmt.Errorf("error getting sharding config map: %s", err)
|
||||
}
|
||||
log.Infof("shard mapping configmap %s not found. Creating default shard mapping configmap.", common.ArgoCDAppControllerShardConfigMapName)
|
||||
|
||||
// if the shard is not set as an environment variable, set the default value of shard to 0 for generating default CM
|
||||
if shard == -1 {
|
||||
shard = 0
|
||||
}
|
||||
shardMappingCM, err = generateDefaultShardMappingCM(settingsMgr.GetNamespace(), hostname, replicas, shard)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error generating default shard mapping configmap %s", err)
|
||||
}
|
||||
if _, err = kubeClient.CoreV1().ConfigMaps(settingsMgr.GetNamespace()).Create(context.Background(), shardMappingCM, metav1.CreateOptions{}); err != nil {
|
||||
return -1, fmt.Errorf("error creating shard mapping configmap %s", err)
|
||||
}
|
||||
// return 0 as the controller is assigned to shard 0 while generating default shard mapping ConfigMap
|
||||
return shard, nil
|
||||
} else {
|
||||
// Identify the available shard and update the ConfigMap
|
||||
data := shardMappingCM.Data[ShardControllerMappingKey]
|
||||
var shardMappingData []shardApplicationControllerMapping
|
||||
err := json.Unmarshal([]byte(data), &shardMappingData)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error unmarshalling shard config map data: %s", err)
|
||||
}
|
||||
|
||||
shard, shardMappingData := getOrUpdateShardNumberForController(shardMappingData, hostname, replicas, shard)
|
||||
updatedShardMappingData, err := json.Marshal(shardMappingData)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error marshalling data of shard mapping ConfigMap: %s", err)
|
||||
}
|
||||
shardMappingCM.Data[ShardControllerMappingKey] = string(updatedShardMappingData)
|
||||
|
||||
_, err = kubeClient.CoreV1().ConfigMaps(settingsMgr.GetNamespace()).Update(context.Background(), shardMappingCM, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return shard, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getOrUpdateShardNumberForController takes list of shardApplicationControllerMapping and performs computation to find the matching or empty shard number
|
||||
func getOrUpdateShardNumberForController(shardMappingData []shardApplicationControllerMapping, hostname string, replicas, shard int) (int, []shardApplicationControllerMapping) {
|
||||
|
||||
// if current length of shardMappingData in shard mapping configMap is less than the number of replicas,
|
||||
// create additional empty entries for missing shard numbers in shardMappingDataconfigMap
|
||||
if len(shardMappingData) < replicas {
|
||||
// generate extra default mappings
|
||||
for currentShard := len(shardMappingData); currentShard < replicas; currentShard++ {
|
||||
shardMappingData = append(shardMappingData, shardApplicationControllerMapping{
|
||||
ShardNumber: currentShard,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// if current length of shardMappingData in shard mapping configMap is more than the number of replicas,
|
||||
// we replace the config map with default config map and let controllers self assign the new shard to itself
|
||||
if len(shardMappingData) > replicas {
|
||||
shardMappingData = getDefaultShardMappingData(replicas)
|
||||
}
|
||||
|
||||
if shard != -1 && shard < replicas {
|
||||
log.Debugf("update heartbeat for shard %d", shard)
|
||||
for i := range shardMappingData {
|
||||
shardMapping := shardMappingData[i]
|
||||
if shardMapping.ShardNumber == shard {
|
||||
log.Debugf("Shard found. Updating heartbeat!!")
|
||||
shardMapping.ControllerName = hostname
|
||||
shardMapping.HeartbeatTime = heartbeatCurrentTime()
|
||||
shardMappingData[i] = shardMapping
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// find the matching shard with assigned controllerName
|
||||
for i := range shardMappingData {
|
||||
shardMapping := shardMappingData[i]
|
||||
if shardMapping.ControllerName == hostname {
|
||||
log.Debugf("Shard matched. Updating heartbeat!!")
|
||||
shard = int(shardMapping.ShardNumber)
|
||||
shardMapping.HeartbeatTime = heartbeatCurrentTime()
|
||||
shardMappingData[i] = shardMapping
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// at this point, we have still not found a shard with matching hostname.
|
||||
// So, find a shard with either no controller assigned or assigned controller
|
||||
// with heartbeat past threshold
|
||||
if shard == -1 {
|
||||
for i := range shardMappingData {
|
||||
shardMapping := shardMappingData[i]
|
||||
if (shardMapping.ControllerName == "") || (metav1.Now().After(shardMapping.HeartbeatTime.Add(time.Duration(HeartbeatTimeout) * time.Second))) {
|
||||
shard = int(shardMapping.ShardNumber)
|
||||
log.Debugf("Empty shard found %d", shard)
|
||||
shardMapping.ControllerName = hostname
|
||||
shardMapping.HeartbeatTime = heartbeatCurrentTime()
|
||||
shardMappingData[i] = shardMapping
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return shard, shardMappingData
|
||||
}
|
||||
|
||||
// generateDefaultShardMappingCM creates a default shard mapping configMap. Assigns current controller to shard 0.
|
||||
func generateDefaultShardMappingCM(namespace, hostname string, replicas, shard int) (*v1.ConfigMap, error) {
|
||||
|
||||
shardingCM := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDAppControllerShardConfigMapName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string]string{},
|
||||
}
|
||||
|
||||
shardMappingData := getDefaultShardMappingData(replicas)
|
||||
|
||||
// if shard is not assigned to a controller, we use shard 0
|
||||
if shard == -1 || shard > replicas {
|
||||
shard = 0
|
||||
}
|
||||
shardMappingData[shard].ControllerName = hostname
|
||||
shardMappingData[shard].HeartbeatTime = heartbeatCurrentTime()
|
||||
|
||||
data, err := json.Marshal(shardMappingData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating default ConfigMap: %s", err)
|
||||
}
|
||||
shardingCM.Data[ShardControllerMappingKey] = string(data)
|
||||
|
||||
return shardingCM, nil
|
||||
}
|
||||
|
||||
func getDefaultShardMappingData(replicas int) []shardApplicationControllerMapping {
|
||||
shardMappingData := make([]shardApplicationControllerMapping, 0)
|
||||
|
||||
for i := 0; i < replicas; i++ {
|
||||
mapping := shardApplicationControllerMapping{
|
||||
ShardNumber: i,
|
||||
}
|
||||
shardMappingData = append(shardMappingData, mapping)
|
||||
}
|
||||
return shardMappingData
|
||||
}
|
||||
|
||||
@@ -1,51 +1,59 @@
|
||||
package sharding
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestGetShardByID_NotEmptyID(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
assert.Equal(t, 0, LegacyDistributionFunction()(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction()(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction()(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction()(&v1alpha1.Cluster{ID: "4"}))
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "3"}))
|
||||
assert.Equal(t, 0, LegacyDistributionFunction(db)(&v1alpha1.Cluster{ID: "4"}))
|
||||
}
|
||||
|
||||
func TestGetShardByID_EmptyID(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction()(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, 0, shard)
|
||||
}
|
||||
|
||||
func TestGetShardByID_NoReplicas(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "0")
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction()(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
func TestGetShardByID_NoReplicasUsingHashDistributionFunction(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "0")
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
distributionFunction := LegacyDistributionFunction
|
||||
shard := distributionFunction()(&v1alpha1.Cluster{})
|
||||
shard := distributionFunction(db)(&v1alpha1.Cluster{})
|
||||
assert.Equal(t, -1, shard)
|
||||
}
|
||||
|
||||
func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
// Test with replicas set to 0
|
||||
t.Setenv(common.EnvControllerReplicas, "0")
|
||||
db.On("GetApplicationControllerReplicas").Return(0)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
assert.Equal(t, -1, distributionFunction(nil))
|
||||
@@ -59,8 +67,9 @@ func TestGetShardByID_NoReplicasUsingHashDistributionFunctionWithClusters(t *tes
|
||||
func TestGetClusterFilterDefault(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
os.Unsetenv(common.EnvControllerShardingAlgorithm)
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
filter := GetClusterFilter(GetDistributionFunction(nil, common.DefaultShardingAlgorithm), shardIndex)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
@@ -69,9 +78,10 @@ func TestGetClusterFilterDefault(t *testing.T) {
|
||||
|
||||
func TestGetClusterFilterLegacy(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
filter := GetClusterFilter(GetDistributionFunction(nil, common.LegacyShardingAlgorithm), shardIndex)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
@@ -80,9 +90,10 @@ func TestGetClusterFilterLegacy(t *testing.T) {
|
||||
|
||||
func TestGetClusterFilterUnknown(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "unknown")
|
||||
filter := GetClusterFilter(GetDistributionFunction(nil, "unknown"), shardIndex)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, "unknown"), shardIndex)
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "3"}))
|
||||
@@ -91,8 +102,9 @@ func TestGetClusterFilterUnknown(t *testing.T) {
|
||||
|
||||
func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
filter := GetClusterFilter(GetDistributionFunction(nil, common.DefaultShardingAlgorithm), shardIndex)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "1"}))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "2"}))
|
||||
@@ -100,20 +112,19 @@ func TestLegacyGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
assert.True(t, filter(&v1alpha1.Cluster{ID: "4"}))
|
||||
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(GetDistributionFunction(nil, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{ID: "4", Shard: &fixedShard}))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(GetDistributionFunction(nil, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.DefaultShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
|
||||
filter := GetClusterFilter(GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), shardIndex)
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(nil))
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
@@ -123,20 +134,20 @@ func TestRoundRobinGetClusterFilterWithFixedShard(t *testing.T) {
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.RoundRobinShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterLegacyHash(t *testing.T) {
|
||||
shardIndex := 1 // ensuring that a shard with index 1 will process all the clusters with an "even" id (2,4,6,...)
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, "hash")
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
filter := GetClusterFilter(GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
filter := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, filter(&cluster1))
|
||||
assert.True(t, filter(&cluster2))
|
||||
assert.False(t, filter(&cluster3))
|
||||
@@ -145,22 +156,22 @@ func TestGetClusterFilterLegacyHash(t *testing.T) {
|
||||
// a cluster with a fixed shard should be processed by the specified exact
|
||||
// same shard unless the specified shard index is greater than the number of replicas.
|
||||
var fixedShard int64 = 4
|
||||
filter = GetClusterFilter(GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.False(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
|
||||
fixedShard = 1
|
||||
filter = GetClusterFilter(GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
filter = GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), int(fixedShard))
|
||||
assert.True(t, filter(&v1alpha1.Cluster{Name: "cluster4", ID: "4", Shard: &fixedShard}))
|
||||
}
|
||||
|
||||
func TestGetClusterFilterWithEnvControllerShardingAlgorithms(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, _ := createTestClusters()
|
||||
shardIndex := 1
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
|
||||
t.Run("legacy", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.LegacyShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
@@ -170,7 +181,7 @@ func TestGetClusterFilterWithEnvControllerShardingAlgorithms(t *testing.T) {
|
||||
|
||||
t.Run("roundrobin", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerShardingAlgorithm, common.RoundRobinShardingAlgorithm)
|
||||
shardShouldProcessCluster := GetClusterFilter(GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
shardShouldProcessCluster := GetClusterFilter(db, GetDistributionFunction(db, common.LegacyShardingAlgorithm), shardIndex)
|
||||
assert.False(t, shardShouldProcessCluster(&cluster1))
|
||||
assert.True(t, shardShouldProcessCluster(&cluster2))
|
||||
assert.False(t, shardShouldProcessCluster(&cluster3))
|
||||
@@ -183,7 +194,7 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
|
||||
|
||||
t.Run("replicas set to 1", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "1")
|
||||
db.On("GetApplicationControllerReplicas").Return(1).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
@@ -194,7 +205,7 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 2", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db.On("GetApplicationControllerReplicas").Return(2).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
@@ -205,7 +216,7 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunction2(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("replicas set to 3", func(t *testing.T) {
|
||||
t.Setenv(common.EnvControllerReplicas, "3")
|
||||
db.On("GetApplicationControllerReplicas").Return(3).Once()
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
@@ -229,7 +240,7 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterNumber
|
||||
clusterList.Items = append(clusterList.Items, cluster)
|
||||
}
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
for i, c := range clusterList.Items {
|
||||
assert.Equal(t, i%2, distributionFunction(&c))
|
||||
@@ -249,7 +260,7 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
db.On("ListClusters", mock.Anything).Return(clusterList, nil)
|
||||
|
||||
// Test with replicas set to 2
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(&db)
|
||||
assert.Equal(t, 0, distributionFunction(nil))
|
||||
assert.Equal(t, 0, distributionFunction(&cluster1))
|
||||
@@ -270,7 +281,7 @@ func TestGetShardByIndexModuloReplicasCountDistributionFunctionWhenClusterIsAdde
|
||||
|
||||
func TestGetShardByIndexModuloReplicasCountDistributionFunction(t *testing.T) {
|
||||
db, cluster1, cluster2, _, _, _ := createTestClusters()
|
||||
t.Setenv(common.EnvControllerReplicas, "2")
|
||||
db.On("GetApplicationControllerReplicas").Return(2)
|
||||
distributionFunction := RoundRobinDistributionFunction(db)
|
||||
|
||||
// Test that the function returns the correct shard for cluster1 and cluster2
|
||||
@@ -291,8 +302,8 @@ func TestInferShard(t *testing.T) {
|
||||
// Override the os.Hostname function to return a specific hostname for testing
|
||||
defer func() { osHostnameFunction = os.Hostname }()
|
||||
|
||||
osHostnameFunction = func() (string, error) { return "example-shard-3", nil }
|
||||
expectedShard := 3
|
||||
osHostnameFunction = func() (string, error) { return "example-shard-3", nil }
|
||||
actualShard, _ := InferShard()
|
||||
assert.Equal(t, expectedShard, actualShard)
|
||||
|
||||
@@ -333,3 +344,335 @@ func createCluster(name string, id string) v1alpha1.Cluster {
|
||||
}
|
||||
return cluster
|
||||
}
|
||||
|
||||
func Test_getDefaultShardMappingData(t *testing.T) {
|
||||
expectedData := []shardApplicationControllerMapping{
|
||||
{
|
||||
ShardNumber: 0,
|
||||
ControllerName: "",
|
||||
}, {
|
||||
ShardNumber: 1,
|
||||
ControllerName: "",
|
||||
},
|
||||
}
|
||||
|
||||
shardMappingData := getDefaultShardMappingData(2)
|
||||
assert.Equal(t, expectedData, shardMappingData)
|
||||
}
|
||||
|
||||
func Test_generateDefaultShardMappingCM_NoPredefinedShard(t *testing.T) {
|
||||
replicas := 2
|
||||
expectedTime := metav1.Now()
|
||||
defer func() { osHostnameFunction = os.Hostname }()
|
||||
defer func() { heartbeatCurrentTime = metav1.Now }()
|
||||
|
||||
expectedMapping := []shardApplicationControllerMapping{
|
||||
{
|
||||
ShardNumber: 0,
|
||||
ControllerName: "test-example",
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ShardNumber: 1,
|
||||
},
|
||||
}
|
||||
|
||||
expectedMappingCM, err := json.Marshal(expectedMapping)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedShadingCM := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDAppControllerShardConfigMapName,
|
||||
Namespace: "test",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"shardControllerMapping": string(expectedMappingCM),
|
||||
},
|
||||
}
|
||||
heartbeatCurrentTime = func() metav1.Time { return expectedTime }
|
||||
osHostnameFunction = func() (string, error) { return "test-example", nil }
|
||||
shardingCM, err := generateDefaultShardMappingCM("test", "test-example", replicas, -1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedShadingCM, shardingCM)
|
||||
|
||||
}
|
||||
|
||||
func Test_generateDefaultShardMappingCM_PredefinedShard(t *testing.T) {
|
||||
replicas := 2
|
||||
expectedTime := metav1.Now()
|
||||
defer func() { osHostnameFunction = os.Hostname }()
|
||||
defer func() { heartbeatCurrentTime = metav1.Now }()
|
||||
|
||||
expectedMapping := []shardApplicationControllerMapping{
|
||||
{
|
||||
ShardNumber: 0,
|
||||
}, {
|
||||
ShardNumber: 1,
|
||||
ControllerName: "test-example",
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
}
|
||||
|
||||
expectedMappingCM, err := json.Marshal(expectedMapping)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedShadingCM := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDAppControllerShardConfigMapName,
|
||||
Namespace: "test",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"shardControllerMapping": string(expectedMappingCM),
|
||||
},
|
||||
}
|
||||
heartbeatCurrentTime = func() metav1.Time { return expectedTime }
|
||||
osHostnameFunction = func() (string, error) { return "test-example", nil }
|
||||
shardingCM, err := generateDefaultShardMappingCM("test", "test-example", replicas, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedShadingCM, shardingCM)
|
||||
|
||||
}
|
||||
|
||||
func Test_getOrUpdateShardNumberForController(t *testing.T) {
|
||||
expectedTime := metav1.Now()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
shardApplicationControllerMapping []shardApplicationControllerMapping
|
||||
hostname string
|
||||
replicas int
|
||||
shard int
|
||||
expectedShard int
|
||||
expectedShardMappingData []shardApplicationControllerMapping
|
||||
}{
|
||||
{
|
||||
name: "length of shard mapping less than number of replicas - Existing controller",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
hostname: "test-example",
|
||||
replicas: 2,
|
||||
shard: -1,
|
||||
expectedShard: 0,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "length of shard mapping less than number of replicas - New controller",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
hostname: "test-example-1",
|
||||
replicas: 2,
|
||||
shard: -1,
|
||||
expectedShard: 1,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "length of shard mapping more than number of replicas",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
hostname: "test-example",
|
||||
replicas: 1,
|
||||
shard: -1,
|
||||
expectedShard: 0,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "shard number is pre-specified and length of shard mapping less than number of replicas - Existing controller",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
}, {
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
hostname: "test-example-1",
|
||||
replicas: 2,
|
||||
shard: 1,
|
||||
expectedShard: 1,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "shard number is pre-specified and length of shard mapping less than number of replicas - New controller",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
hostname: "test-example-1",
|
||||
replicas: 2,
|
||||
shard: 1,
|
||||
expectedShard: 1,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "shard number is pre-specified and length of shard mapping more than number of replicas",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-2",
|
||||
ShardNumber: 2,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
hostname: "test-example",
|
||||
replicas: 2,
|
||||
shard: 1,
|
||||
expectedShard: 1,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
}, {
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "updating heartbeat",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
hostname: "test-example-1",
|
||||
replicas: 2,
|
||||
shard: -1,
|
||||
expectedShard: 1,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "updating heartbeat - shard pre-defined",
|
||||
shardApplicationControllerMapping: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: metav1.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
hostname: "test-example-1",
|
||||
replicas: 2,
|
||||
shard: 1,
|
||||
expectedShard: 1,
|
||||
expectedShardMappingData: []shardApplicationControllerMapping{
|
||||
{
|
||||
ControllerName: "test-example",
|
||||
ShardNumber: 0,
|
||||
HeartbeatTime: expectedTime,
|
||||
}, {
|
||||
ControllerName: "test-example-1",
|
||||
ShardNumber: 1,
|
||||
HeartbeatTime: expectedTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
defer func() { osHostnameFunction = os.Hostname }()
|
||||
heartbeatCurrentTime = func() metav1.Time { return expectedTime }
|
||||
shard, shardMappingData := getOrUpdateShardNumberForController(tc.shardApplicationControllerMapping, tc.hostname, tc.replicas, tc.shard)
|
||||
assert.Equal(t, tc.expectedShard, shard)
|
||||
assert.Equal(t, tc.expectedShardMappingData, shardMappingData)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -391,6 +391,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
now := metav1.Now()
|
||||
|
||||
var manifestInfos []*apiclient.ManifestResponse
|
||||
targetNsExists := false
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
// If the length of revisions is not same as the length of sources,
|
||||
@@ -453,6 +454,13 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
LastTransitionTime: &now,
|
||||
})
|
||||
}
|
||||
|
||||
// If we reach this path, this means that a namespace has been both defined in Git, as well in the
|
||||
// application's managedNamespaceMetadata. We want to ensure that this manifest is the one being used instead
|
||||
// of what is present in managedNamespaceMetadata.
|
||||
if isManagedNamespace(targetObj, app) {
|
||||
targetNsExists = true
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("dedup_ms")
|
||||
|
||||
@@ -511,7 +519,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
// entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a
|
||||
// namespace which we can compare the live namespace with. For that, we'll do the same as is done in
|
||||
// gitops-engine, the difference here being that we create a managed namespace which is only used for comparison.
|
||||
if isManagedNamespace(liveObj, app) {
|
||||
//
|
||||
// targetNsExists == true implies that it already exists as a target, so no need to add the namespace to the
|
||||
// targetObjs array.
|
||||
if isManagedNamespace(liveObj, app) && !targetNsExists {
|
||||
nsSpec := &v1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}}
|
||||
managedNs, err := kubeutil.ToUnstructured(nsSpec)
|
||||
|
||||
|
||||
@@ -89,6 +89,122 @@ func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// TestCompareAppStateNamespaceMetadataDiffers tests comparison when managed namespace metadata differs to live and manifest ns
|
||||
func TestCompareAppStateNamespaceMetadataDiffersToManifest(t *testing.T) {
|
||||
ns := NewNamespace()
|
||||
ns.SetName(test.FakeDestNamespace)
|
||||
ns.SetNamespace(test.FakeDestNamespace)
|
||||
ns.SetAnnotations(map[string]string{"bar": "bat"})
|
||||
|
||||
app := newFakeApp()
|
||||
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
app.Status.OperationState = &argoappv1.OperationState{
|
||||
SyncResult: &argoappv1.SyncOperationResult{},
|
||||
}
|
||||
|
||||
liveNs := ns.DeepCopy()
|
||||
liveNs.SetAnnotations(nil)
|
||||
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{toJSON(t, liveNs)},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(ns): ns,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
assert.Len(t, compRes.resources, 1)
|
||||
assert.Len(t, compRes.managedResources, 1)
|
||||
assert.NotNil(t, compRes.diffResultList)
|
||||
assert.Len(t, compRes.diffResultList.Diffs, 1)
|
||||
|
||||
result := NewNamespace()
|
||||
assert.NoError(t, json.Unmarshal(compRes.diffResultList.Diffs[0].PredictedLive, result))
|
||||
|
||||
labels := result.GetLabels()
|
||||
delete(labels, "kubernetes.io/metadata.name")
|
||||
|
||||
assert.Equal(t, map[string]string{}, labels)
|
||||
// Manifests override definitions in managedNamespaceMetadata
|
||||
assert.Equal(t, map[string]string{"bar": "bat"}, result.GetAnnotations())
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// TestCompareAppStateNamespaceMetadata tests comparison when managed namespace metadata differs to live
|
||||
func TestCompareAppStateNamespaceMetadata(t *testing.T) {
|
||||
ns := NewNamespace()
|
||||
ns.SetName(test.FakeDestNamespace)
|
||||
ns.SetNamespace(test.FakeDestNamespace)
|
||||
ns.SetAnnotations(map[string]string{"bar": "bat"})
|
||||
|
||||
app := newFakeApp()
|
||||
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
app.Status.OperationState = &argoappv1.OperationState{
|
||||
SyncResult: &argoappv1.SyncOperationResult{},
|
||||
}
|
||||
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(ns): ns,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
assert.Len(t, compRes.resources, 1)
|
||||
assert.Len(t, compRes.managedResources, 1)
|
||||
assert.NotNil(t, compRes.diffResultList)
|
||||
assert.Len(t, compRes.diffResultList.Diffs, 1)
|
||||
|
||||
result := NewNamespace()
|
||||
assert.NoError(t, json.Unmarshal(compRes.diffResultList.Diffs[0].PredictedLive, result))
|
||||
|
||||
labels := result.GetLabels()
|
||||
delete(labels, "kubernetes.io/metadata.name")
|
||||
|
||||
assert.Equal(t, map[string]string{"foo": "bar"}, labels)
|
||||
assert.Equal(t, map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true", "bar": "bat", "foo": "bar"}, result.GetAnnotations())
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// TestCompareAppStateNamespaceMetadataIsTheSame tests comparison when managed namespace metadata is the same
|
||||
func TestCompareAppStateNamespaceMetadataIsTheSame(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
|
||||
@@ -32,7 +32,7 @@ data:
|
||||
Once the proxy extension is enabled, it can be configured in the main
|
||||
Argo CD configmap ([argocd-cm][2]).
|
||||
|
||||
The example below demonstrate all possible configurations available
|
||||
The example below demonstrates all possible configurations available
|
||||
for proxy extensions:
|
||||
|
||||
```yaml
|
||||
@@ -60,9 +60,11 @@ data:
|
||||
server: https://some-cluster
|
||||
```
|
||||
|
||||
If a the configuration is changed, Argo CD Server will need to be
|
||||
restarted as the proxy handlers are only registered once during the
|
||||
initialization of the server.
|
||||
Note: There is no need to restart Argo CD Server after modifiying the
|
||||
`extension.config` entry in Argo CD configmap. Changes will be
|
||||
automatically applied. A new proxy registry will be built making
|
||||
all new incoming extensions requests (`<argocd-host>/extensions/*`) to
|
||||
respect the new configuration.
|
||||
|
||||
Every configuration entry is explained below:
|
||||
|
||||
|
||||
@@ -71,6 +71,8 @@ We supply a `ClusterRole` and `ClusterRoleBinding` suitable for this purpose in
|
||||
kubectl apply -k examples/k8s-rbac/argocd-server-applications/
|
||||
```
|
||||
|
||||
`argocd-notifications-controller-rbac-clusterrole.yaml` and `argocd-notifications-controller-rbac-clusterrolebinding.yaml` are used to support notifications controller to notify apps in all namespaces.
|
||||
|
||||
!!! note
|
||||
At some later point in time, we may make this cluster role part of the default installation manifests.
|
||||
|
||||
|
||||
@@ -26,3 +26,13 @@ spec:
|
||||
preserveResourcesOnDeletion: false
|
||||
# Alpha feature to determine the order in which ApplicationSet applies changes.
|
||||
strategy:
|
||||
# This field lets you define fields which should be ignored when applying Application resources. This is helpful if you
|
||||
# want to use ApplicationSets to create apps, but also want to allow users to modify those apps without having their
|
||||
# changes overwritten by the ApplicationSet.
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
- name: some-app
|
||||
jqPathExpressions:
|
||||
- .spec.source.helm.values
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ These settings allow you to exert control over when, and how, changes are made t
|
||||
|
||||
Here are some of the controller settings that may be modified to alter the ApplicationSet controller's resource-handling behaviour.
|
||||
|
||||
### Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
|
||||
## Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
|
||||
|
||||
To prevent the ApplicationSet controller from creating, modifying, or deleting any `Application` resources, you may enable `dry-run` mode. This essentially switches the controller into a "read only" mode, where the controller Reconcile loop will run, but no resources will be modified.
|
||||
|
||||
@@ -14,7 +14,7 @@ To enable dry-run, add `--dryrun true` to the ApplicationSet Deployment's contai
|
||||
|
||||
See 'How to modify ApplicationSet container parameters' below for detailed steps on how to add this parameter to the controller.
|
||||
|
||||
### Managed Applications modification Policies
|
||||
## Managed Applications modification Policies
|
||||
|
||||
The ApplicationSet controller supports a parameter `--policy`, which is specified on launch (within the controller Deployment container), and which restricts what types of modifications will be made to managed Argo CD `Application` resources.
|
||||
|
||||
@@ -41,7 +41,7 @@ If the controller parameter `--policy` is set, it takes precedence on the field
|
||||
|
||||
This does not prevent deletion of Applications if the ApplicationSet is deleted
|
||||
|
||||
#### Controller parameter
|
||||
### Controller parameter
|
||||
|
||||
To allow the ApplicationSet controller to *create* `Application` resources, but prevent any further modification, such as deletion, or modification of Application fields, add this parameter in the ApplicationSet controller:
|
||||
```
|
||||
@@ -59,7 +59,7 @@ spec:
|
||||
applicationsSync: create-only
|
||||
```
|
||||
|
||||
### Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
|
||||
## Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
|
||||
|
||||
To allow the ApplicationSet controller to create or modify `Application` resources, but prevent Applications from being deleted, add the following parameter to the ApplicationSet controller `Deployment`:
|
||||
```
|
||||
@@ -79,7 +79,113 @@ spec:
|
||||
applicationsSync: create-update
|
||||
```
|
||||
|
||||
### Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
|
||||
## Ignore certain changes to Applications
|
||||
|
||||
The ApplicationSet spec includes an `ignoreApplicationDifferences` field, which allows you to specify which fields of
|
||||
the ApplicationSet should be ignored when comparing Applications.
|
||||
|
||||
The field supports multiple ignore rules. Each ignore rule may specify a list of either `jsonPointers` or
|
||||
`jqPathExpressions` to ignore.
|
||||
|
||||
You may optionally also specify a `name` to apply the ignore rule to a specific Application, or omit the `name` to apply
|
||||
the ignore rule to all Applications.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
spec:
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
- name: some-app
|
||||
jqPathExpressions:
|
||||
- .spec.source.helm.values
|
||||
```
|
||||
|
||||
### Allow temporarily toggling auto-sync
|
||||
|
||||
One of the most common use cases for ignoring differences is to allow temporarily toggling auto-sync for an Application.
|
||||
|
||||
For example, if you have an ApplicationSet that is configured to automatically sync Applications, you may want to temporarily
|
||||
disable auto-sync for a specific Application. You can do this by adding an ignore rule for the `spec.syncPolicy.automated` field.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
spec:
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/syncPolicy
|
||||
```
|
||||
|
||||
### Limitations of `ignoreApplicationDifferences`
|
||||
|
||||
When an ApplicationSet is reconciled, the controller will compare the ApplicationSet spec with the spec of each Application
|
||||
that it manages. If there are any differences, the controller will generate a patch to update the Application to match the
|
||||
ApplicationSet spec.
|
||||
|
||||
The generated patch is a MergePatch. According to the MergePatch documentation, "existing lists will be completely
|
||||
replaced by new lists" when there is a change to the list.
|
||||
|
||||
This limits the effectiveness of `ignoreApplicationDifferences` when the ignored field is in a list. For example, if you
|
||||
have an application with multiple sources, and you want to ignore changes to the `targetRevision` of one of the sources,
|
||||
changes in other fields or in other sources will cause the entire `sources` list to be replaced, and the `targetRevision`
|
||||
field will be reset to the value defined in the ApplicationSet.
|
||||
|
||||
For example, consider this ApplicationSet:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
spec:
|
||||
ignoreApplicationDifferences:
|
||||
- jqPathExpressions:
|
||||
- .spec.sources[] | select(.repoURL == "https://git.example.com/org/repo1").targetRevision
|
||||
template:
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: main
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
You can freely change the `targetRevision` of the `repo1` source, and the ApplicationSet controller will not overwrite
|
||||
your change.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: fix/bug-123
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
However, if you change the `targetRevision` of the `repo2` source, the ApplicationSet controller will overwrite the entire
|
||||
`sources` field.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: main
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
!!! note
|
||||
[Future improvements](https://github.com/argoproj/argo-cd/issues/15975) to the ApplicationSet controller may
|
||||
eliminate this problem. For example, the `ref` field might be made a merge key, allowing the ApplicationSet
|
||||
controller to generate and use a StrategicMergePatch instead of a MergePatch. You could then target a specific
|
||||
source by `ref`, ignore changes to a field in that source, and changes to other sources would not cause the ignored
|
||||
field to be overwritten.
|
||||
|
||||
## Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
|
||||
|
||||
By default, when an `Application` resource is deleted by the ApplicationSet controller, all of the child resources of the Application will be deleted as well (such as, all of the Application's `Deployments`, `Services`, etc).
|
||||
|
||||
@@ -96,7 +202,7 @@ spec:
|
||||
More information on the specific behaviour of `preserveResourcesOnDeletion`, and deletion in ApplicationSet controller and Argo CD in general, can be found on the [Application Deletion](Application-Deletion.md) page.
|
||||
|
||||
|
||||
### Prevent an Application's child resources from being modified
|
||||
## Prevent an Application's child resources from being modified
|
||||
|
||||
Changes made to the ApplicationSet will propagate to the Applications managed by the ApplicationSet, and then Argo CD will propagate the Application changes to the underlying cluster resources (as per [Argo CD Integration](Argo-CD-Integration.md)).
|
||||
|
||||
@@ -162,6 +268,11 @@ kubectl apply -n argocd -f install.yaml
|
||||
|
||||
## Preserving changes made to an Applications annotations and labels
|
||||
|
||||
!!! note
|
||||
The same behavior can be achieved on a per-app basis using the [`ignoreApplicationDifferences`](#ignore-certain-changes-to-applications)
|
||||
feature described above. However, preserved fields may be configured globally, a feature that is not yet available
|
||||
for `ignoreApplicationDifferences`.
|
||||
|
||||
It is common practice in Kubernetes to store state in annotations, operators will often make use of this. To allow for this, it is possible to configure a list of annotations that the ApplicationSet should preserve when reconciling.
|
||||
|
||||
For example, imagine that we have an Application created from an ApplicationSet, but a custom annotation and label has since been added (to the Application) that does not exist in the `ApplicationSet` resource:
|
||||
@@ -198,48 +309,17 @@ By default, the Argo CD notifications and the Argo CD refresh type annotations a
|
||||
One can also set global preserved fields for the controller by passing a comma separated list of annotations and labels to
|
||||
`ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS` and `ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS` respectively.
|
||||
|
||||
## Limitations: what isn't supported as of the current release
|
||||
## Debugging unexpected changes to Applications
|
||||
|
||||
Here is a list of commonly requested resource modification features which are not supported as of the current release. This lack of support is *not* necessarily by design; rather these behaviours are documented here to provide clear, concise descriptions of the current state of the feature.
|
||||
When the ApplicationSet controller makes a change to an application, it logs the patch at the debug level. To see these
|
||||
logs, set the log level to debug in the `argocd-cmd-params-cm` ConfigMap in the `argocd` namespace:
|
||||
|
||||
### Limitation: No support for manual edits to individual Applications
|
||||
|
||||
There is currently no way to allow modification of a single child Application of an ApplicationSet, for example, if you wanted to make manual edits to a single Application for debugging/testing purposes.
|
||||
|
||||
For example:
|
||||
|
||||
- Imagine that you have an ApplicationSet that created Applications `app1`, `app2`, and `app3`.
|
||||
- You now want to edit `app3` with `kubectl edit application/app3`, to update one of the `app3`'s fields.
|
||||
- However, as soon as you make edits to `app3` (or any of the individual Applications), they will be immediately reverted by the ApplicationSet reconciler back to the `template`-ized version (by design).
|
||||
|
||||
As of this writing, there is [an issue open](https://github.com/argoproj/applicationset/issues/186) for discussion of this behaviour.
|
||||
|
||||
|
||||
### Limitation: ApplicationSet controller will not selectively ignore changes to individual fields
|
||||
|
||||
Currently, you can only instruct the ApplicationSet controller to ignore changes to Application annotations and labels.
|
||||
|
||||
For example, imagine that we have an Application created from an ApplicationSet, but a user has attempted to add a custom annotation/label (to the Application) that does not exist in the `ApplicationSet` resource:
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
# This label exists only on this Application, and not in
|
||||
# the parent ApplicationSet template:
|
||||
my-custom-label: some-value
|
||||
spec:
|
||||
# (...)
|
||||
name: argocd-cmd-params-cm
|
||||
namespace: argocd
|
||||
data:
|
||||
applicationsetcontroller.log.level: debug
|
||||
```
|
||||
|
||||
As above, the `ApplicationSet` resource does not have a `my-custom-label: some-value` label in the `.spec.template.labels` for the Application.
|
||||
|
||||
Since this field is not in the ApplicationSet template, as soon as a user adds this custom label, it will be immediately reverted (removed) by the ApplicationSet controller.
|
||||
|
||||
If the labels/annotations are not mentioned in appset preserved fields, there is currently no way for disabling or customizing this behaviour.
|
||||
|
||||
To some extent this is by design: the main principle of ApplicationSets is that we maintain a 1-to-many relationship between the ApplicationSet and the Applications that it owns, such that all the Applications necessarily conform to a strict template.
|
||||
|
||||
This provides the advantages of the 'cattle not pets' philosophy of microservice/cloud native application resource management, wherein you don't need to worry about individual Applications differing from each other in subtle ways: they will all necessarily be reconciled to be consistent with the parent template.
|
||||
|
||||
BUT, support exists for preserving changes to Application annotations and labels as documented [above](#preserving-changes-made-to-an-applications-annotations-and-labels).
|
||||
|
||||
@@ -174,6 +174,18 @@ It is also possible to use Sprig functions to construct the path variables manua
|
||||
| `{{path.filenameNormalized}}` | `{{.path.filenameNormalized}}` | `{{normalize .path.filename}}` |
|
||||
| `{{path[N]}}` | `-` | `{{index .path.segments N}}` |
|
||||
|
||||
## Available template functions
|
||||
|
||||
ApplicationSet controller provides:
|
||||
|
||||
- all [sprig](http://masterminds.github.io/sprig/) Go templates function except `env`, `expandenv` and `getHostByName`
|
||||
- `normalize`: sanitizes the input so that it complies with the following rules:
|
||||
1. contains no more than 253 characters
|
||||
2. contains only lowercase alphanumeric characters, '-' or '.'
|
||||
3. starts and ends with an alphanumeric character
|
||||
- `toYaml` / `fromYaml` / `fromYamlArray` helm like functions
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Go template usage
|
||||
|
||||
@@ -44,6 +44,7 @@ spec:
|
||||
args: [-c, 'echo "Initializing..."']
|
||||
# The generate command runs in the Application source directory each time manifests are generated. Standard output
|
||||
# must be ONLY valid Kubernetes Objects in either YAML or JSON. A non-zero exit code will fail manifest generation.
|
||||
# To write log messages from the command, write them to stderr, it will always be displayed.
|
||||
# Error output will be sent to the UI, so avoid printing sensitive information (such as secrets).
|
||||
generate:
|
||||
command: [sh, -c]
|
||||
@@ -333,6 +334,7 @@ If you are actively developing a sidecar-installed CMP, keep a few things in min
|
||||
3. CMP errors are cached by the repo-server in Redis. Restarting the repo-server Pod will not clear the cache. Always
|
||||
do a "Hard Refresh" when actively developing a CMP so you have the latest output.
|
||||
4. Verify your sidecar has started properly by viewing the Pod and seeing that two containers are running `kubectl get pod -l app.kubernetes.io/component=repo-server -n argocd`
|
||||
5. Write log message to stderr and set the `--loglevel=info` flag in the sidecar. This will print everything written to stderr, even on successfull command execution.
|
||||
|
||||
|
||||
### Other Common Errors
|
||||
|
||||
@@ -266,7 +266,7 @@ metadata:
|
||||
argocd.argoproj.io/secret-type: repository
|
||||
stringData:
|
||||
type: git
|
||||
repo: https://source.developers.google.com/p/my-google-project/r/my-repo
|
||||
url: https://source.developers.google.com/p/my-google-project/r/my-repo
|
||||
gcpServiceAccountKey: |
|
||||
{
|
||||
"type": "service_account",
|
||||
|
||||
59
docs/operator-manual/dynamic-cluster-distribution.md
Normal file
59
docs/operator-manual/dynamic-cluster-distribution.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Dynamic Cluster Distribution
|
||||
|
||||
*Current Status: [Alpha][1] (Since v2.9.0)*
|
||||
|
||||
By default, clusters are assigned to shards indefinitely. For users of the default, hash-based sharding algorithm, this
|
||||
static assignment is fine: shards will always be roughly-balanced by the hash-based algorithm. But for users of the
|
||||
[round-robin](high_availability.md#argocd-application-controller) or other custom shard assignment algorithms, this
|
||||
static assignment can lead to unbalanced shards when replicas are added or removed.
|
||||
|
||||
Starting v2.9, Argo CD supports a dynamic cluster distribution feature. When replicas are added or removed, the sharding
|
||||
algorithm is re-run to ensure that the clusters are distributed according to the algorithm. If the algorithm is
|
||||
well-balanced, like round-robin, then the shards will be well-balanced.
|
||||
|
||||
Previously, the shard count was set via the `ARGOCD_CONTROLLER_REPLICAS` environment variable. Changing the environment
|
||||
variable forced a restart of all application controller pods. Now, the shard count is set via the `replicas` field of the deployment,
|
||||
which does not require a restart of the application controller pods.
|
||||
|
||||
## Enabling Dynamic Distribution of Clusters
|
||||
|
||||
This feature is disabled by default while it is in alpha. To enable it, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller.
|
||||
|
||||
In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize
|
||||
overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. The
|
||||
dynamic distribution code automatically kicks in when the controller is deployed as a Deployment.
|
||||
|
||||
!!! important
|
||||
The use of a Deployment instead of a StatefulSet is an implementation detail which may change in future versions of
|
||||
this feature. Therefore, the directory name of the Kustomize overlay may change as well. Monitor the release notes
|
||||
to avoid issues.
|
||||
|
||||
Note the introduction of new environment variable `ARGOCD_CONTROLLER_HEARTBEAT_TIME`. The environment variable is explained in [working of Dynamic Distribution Heartbeat Process](#working-of-dynamic-distribution)
|
||||
|
||||
## Working of Dynamic Distribution
|
||||
|
||||
To accomplish runtime distribution of clusters, the Application Controller uses a ConfigMap to associate a controller
|
||||
pod with a shard number and a heartbeat to ensure that controller pods are still alive and handling their shard, in
|
||||
effect, their share of the work.
|
||||
|
||||
The Application Controller will create a new ConfigMap named `argocd-app-controller-shard-cm` to store the Controller <-> Shard mapping. The mapping would look like below for each shard:
|
||||
|
||||
```yaml
|
||||
ShardNumber : 0
|
||||
ControllerName : "argocd-application-controller-hydrxyt"
|
||||
HeartbeatTime : "2009-11-17 20:34:58.651387237 +0000 UTC"
|
||||
```
|
||||
|
||||
* `ControllerName`: Stores the hostname of the Application Controller pod
|
||||
* `ShardNumber` : Stores the shard number managed by the controller pod
|
||||
* `HeartbeatTime`: Stores the last time this heartbeat was updated.
|
||||
|
||||
Controller Shard Mapping is updated in the ConfigMap during each readiness probe check of the pod, that is every 10 seconds (otherwise as configured). The controller will acquire the shard during every iteration of readiness probe check and try to update the ConfigMap with the `HeartbeatTime`. The default `HeartbeatDuration` after which the heartbeat should be updated is `10` seconds. If the ConfigMap was not updated for any controller pod for more than `3 * HeartbeatDuration`, then the readiness probe for the application pod is marked as `Unhealthy`. To increase the default `HeartbeatDuration`, you can set the environment variable `ARGOCD_CONTROLLER_HEARTBEAT_TIME` with the desired value.
|
||||
|
||||
The new sharding mechanism does not monitor the environment variable `ARGOCD_CONTROLLER_REPLICAS` but instead reads the replica count directly from the Application Controller Deployment. The controller identifies the change in the number of replicas by comparing the replica count in the Application Controller Deployment and the number of mappings in the `argocd-app-controller-shard-cm` ConfigMap.
|
||||
|
||||
In the scenario when the number of Application Controller replicas increases, a new entry is added to the list of mappings in the `argocd-app-controller-shard-cm` ConfigMap and the cluster distribution is triggered to re-distribute the clusters.
|
||||
|
||||
In the scenario when the number of Application Controller replicas decreases, the mappings in the `argocd-app-controller-shard-cm` ConfigMap are reset and every controller acquires the shard again thus triggering the re-distribution of the clusters.
|
||||
|
||||
[1]: https://github.com/argoproj/argoproj/blob/master/community/feature-status.md
|
||||
@@ -61,8 +61,7 @@ reconciliation. In this case, we advise to use the preferred resource version in
|
||||
|
||||
* If the controller is managing too many clusters and uses too much memory then you can shard clusters across multiple
|
||||
controller replicas. To enable sharding, increase the number of replicas in `argocd-application-controller` `StatefulSet`
|
||||
and repeat the number of replicas in the `ARGOCD_CONTROLLER_REPLICAS` environment variable. The strategic merge patch below
|
||||
demonstrates changes required to configure two controller replicas.
|
||||
and repeat the number of replicas in the `ARGOCD_CONTROLLER_REPLICAS` environment variable. The strategic merge patch below demonstrates changes required to configure two controller replicas.
|
||||
|
||||
* By default, the controller will update the cluster information every 10 seconds. If there is a problem with your cluster network environment that is causing the update time to take a long time, you can try modifying the environment variable `ARGO_CD_UPDATE_CLUSTER_INFO_TIMEOUT` to increase the timeout (the unit is seconds).
|
||||
|
||||
|
||||
@@ -85,20 +85,7 @@ The Argo CD can be installed using [Helm](https://helm.sh/). The Helm chart is c
|
||||
|
||||
## Supported versions
|
||||
|
||||
Similar to the Kubernetes project, the supported versions of Argo CD at any given point in time are the latest patch releases for the N
|
||||
and N - 1 minor versions.
|
||||
These Argo CD versions are supported on the same versions of Kubernetes that are supported by Kubernetes itself (normally the last 3 released versions).
|
||||
|
||||
Essentially the Argo CD project follows the same support scheme as Kubernetes but for N, N-1 while Kubernetes supports N, N-1, N-2 versions.
|
||||
|
||||
For example if the latest minor version of ArgoCD are 2.4.3 and 2.3.5 while supported Kubernetes versions are 1.24, 1.23 and 1.22 then the following combinations are supported:
|
||||
|
||||
* Argo CD 2.4.3 on Kubernetes 1.24
|
||||
* Argo CD 2.4.3 on Kubernetes 1.23
|
||||
* Argo CD 2.4.3 on Kubernetes 1.22
|
||||
* Argo CD 2.3.5 on Kubernetes 1.24
|
||||
* Argo CD 2.3.5 on Kubernetes 1.23
|
||||
* Argo CD 2.3.5 on Kubernetes 1.22
|
||||
For detailed information regarding Argo CD's version support policy, please refer to the [Release Process and Cadence documentation](https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/).
|
||||
|
||||
## Tested versions
|
||||
|
||||
|
||||
@@ -15,58 +15,59 @@ argocd-application-controller [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--app-hard-resync int Time period in seconds for application hard resync.
|
||||
--app-resync int Time period in seconds for application resync. (default 180)
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-application-controller
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20)
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric
|
||||
--metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
|
||||
--metrics-port int Start metrics server on given port (default 8082)
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--operation-processors int Number of application operation processors (default 10)
|
||||
--otlp-address string OpenTelemetry collector address to send traces to
|
||||
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
|
||||
--password string Password for basic authentication to the API server
|
||||
--persist-resource-health Enables storing the managed resources health in the Application CRD (default true)
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-server string Repo server address. (default "argocd-repo-server:8081")
|
||||
--repo-server-plaintext Disable TLS on connections to repo server
|
||||
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
|
||||
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--status-processors int Number of application status processors (default 20)
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
--app-hard-resync int Time period in seconds for application hard resync.
|
||||
--app-resync int Time period in seconds for application resync. (default 180)
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--dynamic-cluster-distribution-enabled Enables dynamic cluster distribution.
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-application-controller
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20)
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric
|
||||
--metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
|
||||
--metrics-port int Start metrics server on given port (default 8082)
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--operation-processors int Number of application operation processors (default 10)
|
||||
--otlp-address string OpenTelemetry collector address to send traces to
|
||||
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
|
||||
--password string Password for basic authentication to the API server
|
||||
--persist-resource-health Enables storing the managed resources health in the Application CRD (default true)
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-server string Repo server address. (default "argocd-repo-server:8081")
|
||||
--repo-server-plaintext Disable TLS on connections to repo server
|
||||
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
|
||||
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--status-processors int Number of application status processors (default 20)
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
```
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 2.9 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.8 | v1.27, v1.26, v1.25, v1.24 |
|
||||
| 2.7 | v1.26, v1.25, v1.24, v1.23 |
|
||||
| 2.6 | v1.24, v1.23, v1.22 |
|
||||
| 2.5 | v1.24, v1.23, v1.22 |
|
||||
|
||||
|
||||
5
docs/operator-manual/upgrading/2.8-2.9.md
Normal file
5
docs/operator-manual/upgrading/2.8-2.9.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# 2.8 to 2.9
|
||||
|
||||
## Upgraded Kustomize Version
|
||||
|
||||
Note that bundled Kustomize version has been upgraded from 5.1.0 to 5.2.1.
|
||||
@@ -37,6 +37,7 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/<v
|
||||
|
||||
<hr/>
|
||||
|
||||
* [v2.8 to v2.9](./2.8-2.9.md)
|
||||
* [v2.7 to v2.8](./2.7-2.8.md)
|
||||
* [v2.6 to v2.7](./2.6-2.7.md)
|
||||
* [v2.5 to v2.6](./2.5-2.6.md)
|
||||
|
||||
@@ -31,6 +31,7 @@ This proposal is experimental, meaning after trying a single bounty, we will rev
|
||||
|
||||
#### Creating a Bounty
|
||||
A bounty is a special proposal created under `docs/proposals/feature-bounties`.
|
||||
|
||||
* A bounty proposal may only be created by an existing Argo maintainer.
|
||||
* The proposal document must be reviewed in regular maintainer meetings and an invitation for feedback will provide 7-days to comment.
|
||||
* Bounty should have approval with [lazy-consensus](https://community.apache.org/committers/lazyConsensus.html)
|
||||
|
||||
@@ -11,9 +11,10 @@ argocd app rollback APPNAME [ID] [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for rollback
|
||||
--prune Allow deleting unexpected resources
|
||||
--timeout uint Time out after this many seconds
|
||||
-h, --help help for rollback
|
||||
-o, --output string Output format. One of: json|yaml|wide|tree|tree=detailed (default "wide")
|
||||
--prune Allow deleting unexpected resources
|
||||
--timeout uint Time out after this many seconds
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -48,6 +48,7 @@ argocd app sync [APPNAME... | -l selector | --project project-name] [flags]
|
||||
--label stringArray Sync only specific resources with a label. This option may be specified repeatedly.
|
||||
--local string Path to a local directory. When this flag is present no git queries will be made
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
-o, --output string Output format. One of: json|yaml|wide|tree|tree=detailed (default "wide")
|
||||
--preview-changes Preview difference against the target and live state before syncing app and wait for user confirmation
|
||||
--project stringArray Sync apps that belong to the specified projects. This option may be specified repeatedly.
|
||||
--prune Allow deleting unexpected resources
|
||||
|
||||
@@ -42,6 +42,7 @@ argocd app wait [APPNAME.. | -l selector] [flags]
|
||||
--health Wait for health
|
||||
-h, --help help for wait
|
||||
--operation Wait for pending operations
|
||||
-o, --output string Output format. One of: json|yaml|wide|tree|tree=detailed (default "wide")
|
||||
--resource stringArray Sync only specific resources as GROUP:KIND:NAME or !GROUP:KIND:NAME. Fields may be blank and '*' can be used. This option may be specified repeatedly
|
||||
-l, --selector string Wait for apps by label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.
|
||||
--suspended Wait for suspended
|
||||
|
||||
@@ -12,12 +12,64 @@ The following configuration options are available for Kustomize:
|
||||
* `namespace` is a kubernetes resources namespace
|
||||
* `forceCommonAnnotations` is a boolean value which defines if it's allowed to override existing annotations
|
||||
* `commonAnnotationsEnvsubst` is a boolean value which enables env variables substition in annotation values
|
||||
* `patches` is a list of Kustomize patches that supports inline updates
|
||||
|
||||
To use Kustomize with an overlay, point your path to the overlay.
|
||||
|
||||
!!! tip
|
||||
If you're generating resources, you should read up how to ignore those generated resources using the [`IgnoreExtraneous` compare option](compare-options.md).
|
||||
|
||||
## Patches
|
||||
Patches are a way to kustomize resources using inline configurations in Argo CD applications. This allows for kustomizing without kustomization file. `patches` follow the same logic as the corresponding Kustomization. Any patches that target existing Kustomization file will be merged.
|
||||
|
||||
The following Kustomization can be done similarly in an Argo CD application.
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: kustomize-inline-example
|
||||
namespace: test1
|
||||
resources:
|
||||
- https://raw.githubusercontent.com/argoproj/argocd-example-apps/master/guestbook/guestbook-ui-deployment.yaml
|
||||
- https://raw.githubusercontent.com/argoproj/argocd-example-apps/master/guestbook/guestbook-ui-svc.yaml
|
||||
patches:
|
||||
- target:
|
||||
kind: Deployment
|
||||
name: guestbook-ui
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/ports/0/containerPort
|
||||
value: 443
|
||||
```
|
||||
Application will clone the repository, use the specified path, then kustomize using inline patches configuration.
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: kustomize-inline-guestbook
|
||||
namespace: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
spec:
|
||||
destination:
|
||||
namespace: test1
|
||||
server: https://kubernetes.default.svc
|
||||
project: default
|
||||
source:
|
||||
path: guestbook
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
targetRevision: master
|
||||
kustomize:
|
||||
patches:
|
||||
- target:
|
||||
kind: Deployment
|
||||
name: guestbook-ui
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/ports/0/containerPort
|
||||
value: 443
|
||||
```
|
||||
|
||||
## Private Remote Bases
|
||||
|
||||
If you have remote bases that are either (a) HTTPS and need username/password (b) SSH and need SSH private key, then they'll inherit that from the app's repo.
|
||||
|
||||
@@ -24,6 +24,22 @@ There are however several limitations:
|
||||
* Other external tools might write/append to this label and create conflicts with Argo CD. For example several Helm charts and operators also use this label for generated manifests confusing Argo CD about the owner of the application
|
||||
* You might want to deploy more than one Argo CD instance on the same cluster (with cluster wide privileges) and have an easy way to identify which resource is managed by which instance of Argo CD
|
||||
|
||||
### Use custom label
|
||||
|
||||
Instead of using the default `app.kubernetes.io/instance` label for resource tracking, Argo CD can be configured to use a custom label. Below example sets the resource tracking label to `argocd.argoproj.io/instance`.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-cm
|
||||
app.kubernetes.io/part-of: argocd
|
||||
data:
|
||||
application.instanceLabelKey: argocd.argoproj.io/instance
|
||||
```
|
||||
|
||||
## Additional tracking methods via an annotation
|
||||
|
||||
>v2.2
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-notifications-controller-cluster-apps
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: notifications-controller
|
||||
name: argocd-notifications-controller-cluster-apps
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
resources:
|
||||
- "applications"
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
@@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-notifications-controller-cluster-apps
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: notifications-controller
|
||||
name: argocd-notifications-controller-cluster-apps
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-notifications-controller-cluster-apps
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-notifications-controller
|
||||
namespace: argocd
|
||||
@@ -3,3 +3,5 @@ kind: Kustomization
|
||||
resources:
|
||||
- argocd-server-rbac-clusterrole.yaml
|
||||
- argocd-server-rbac-clusterrolebinding.yaml
|
||||
- argocd-notifications-controller-rbac-clusterrole.yaml
|
||||
- argocd-notifications-controller-rbac-clusterrolebinding.yaml
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
yq e -o=json values.yaml | jq '{
|
||||
yq e -o=json values.yaml | jq '[{
|
||||
name: "helm-parameters",
|
||||
title: "Helm Parameters",
|
||||
collectionType: "map",
|
||||
map: [leaf_paths as $path | {"key": $path | join("."), "value": getpath($path)|tostring}] | from_entries
|
||||
}'
|
||||
}]'
|
||||
|
||||
11
go.mod
11
go.mod
@@ -42,7 +42,6 @@ require (
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gosimple/slug v1.13.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||
@@ -78,16 +77,17 @@ require (
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0
|
||||
go.opentelemetry.io/otel/sdk v1.16.0
|
||||
golang.org/x/crypto v0.13.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/term v0.12.0
|
||||
golang.org/x/term v0.13.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc
|
||||
google.golang.org/grpc v1.56.2
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/square/go-jose.v2 v2.6.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apiextensions-apiserver v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
@@ -261,8 +261,8 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/net v0.15.0 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
@@ -274,7 +274,6 @@ require (
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/cli-runtime v0.24.2 // indirect
|
||||
k8s.io/component-base v0.24.2 // indirect
|
||||
k8s.io/component-helpers v0.24.2 // indirect
|
||||
|
||||
13
go.sum
13
go.sum
@@ -1277,7 +1277,6 @@ github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
@@ -2006,8 +2005,9 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -2159,8 +2159,9 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -2352,8 +2353,9 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -2366,8 +2368,9 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
55a8e6dce87a1e52c61e0ce7a89bf85b38725ba3e8deb51d4a08ade8a2c70b2d helm-v3.13.2-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f5654aaed63a0da72852776e1d3f851b2ea9529cb5696337202703c2e1ed2321 helm-v3.13.2-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
11d96134cc4ec106c23cd8c163072e9aed6cd73e36a3da120e5876d426203f37 helm-v3.13.2-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
3ffc5b4a041e5306dc00905ebe5dfea449e34ada268a713d34c69709afd6a9a2 helm-v3.13.2-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
b7aba749da75d33e6fea49a5098747d379abc45583ff5cd16e2356127a396549 kustomize_5.2.1_darwin_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f6a5f3cffd45bac585a0c80b5ed855c2b72d932a1d6e8e7c87aae3be4eba5750 kustomize_5.2.1_darwin_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
88346543206b889f9287c0b92c70708040ecd5aad54dd33019c4d6579cd24de8 kustomize_5.2.1_linux_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
5566f7badece5a72d42075d8dffa6296a228966dd6ac2390de7afbb9675c3aaa kustomize_5.2.1_linux_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
82d732cf624b6fa67dfabe751e9a1510e2d08605996b1b130b4c0f5b835b130e kustomize_5.2.1_linux_ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
d94cb97a2776b4685ab41233dfd5f0b426f399d2fce87d2b69e1ce4907f3aad2 kustomize_5.2.1_linux_s390x.tar.gz
|
||||
@@ -11,8 +11,8 @@
|
||||
# Use ./hack/installers/checksums/add-helm-checksums.sh and
|
||||
# add-kustomize-checksums.sh to help download checksums.
|
||||
###############################################################################
|
||||
helm3_version=3.12.1
|
||||
helm3_version=3.13.2
|
||||
kubectl_version=1.17.8
|
||||
kubectx_version=0.6.3
|
||||
kustomize5_version=5.1.0
|
||||
kustomize5_version=5.2.1
|
||||
protoc_version=3.17.3
|
||||
|
||||
@@ -0,0 +1,218 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: application-controller
|
||||
name: argocd-application-controller
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- /usr/local/bin/argocd-application-controller
|
||||
env:
|
||||
- name: ARGOCD_CONTROLLER_REPLICAS
|
||||
value: "1"
|
||||
- name: ARGOCD_RECONCILIATION_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cm
|
||||
key: timeout.reconciliation
|
||||
optional: true
|
||||
- name: ARGOCD_HARD_RECONCILIATION_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cm
|
||||
key: timeout.hard.reconciliation
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: repo.server
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.repo.server.timeout.seconds
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.status.processors
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_OPERATION_PROCESSORS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.operation.processors
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_LOGFORMAT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.log.format
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_LOGLEVEL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.log.level
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.metrics.cache.expiration
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.self.heal.timeout.seconds
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.repo.server.plaintext
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.repo.server.strict.tls
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.resource.health.persist
|
||||
optional: true
|
||||
- name: ARGOCD_APP_STATE_CACHE_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.app.state.cache.expiration
|
||||
optional: true
|
||||
- name: REDIS_SERVER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: redis.server
|
||||
optional: true
|
||||
- name: REDIS_COMPRESSION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: redis.compression
|
||||
optional: true
|
||||
- name: REDISDB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: redis.db
|
||||
optional: true
|
||||
- name: ARGOCD_DEFAULT_CACHE_EXPIRATION
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.default.cache.expiration
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: otlp.address
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: application.namespaces
|
||||
optional: true
|
||||
- name: ARGOCD_CONTROLLER_SHARDING_ALGORITHM
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.sharding.algorithm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.kubectl.parallelism.limit
|
||||
optional: true
|
||||
- name: ARGOCD_CONTROLLER_HEARTBEAT_TIME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.heatbeat.time
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
- containerPort: 8082
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8082
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
workingDir: /home/argocd
|
||||
volumeMounts:
|
||||
- name: argocd-repo-server-tls
|
||||
mountPath: /app/config/controller/tls
|
||||
- name: argocd-home
|
||||
mountPath: /home/argocd
|
||||
serviceAccountName: argocd-application-controller
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
topologyKey: kubernetes.io/hostname
|
||||
- weight: 5
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/part-of: argocd
|
||||
topologyKey: kubernetes.io/hostname
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: argocd-home
|
||||
- name: argocd-repo-server-tls
|
||||
secret:
|
||||
secretName: argocd-repo-server-tls
|
||||
optional: true
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: tls.crt
|
||||
- key: tls.key
|
||||
path: tls.key
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: argocd-application-controller
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
app.kubernetes.io/part-of: argocd
|
||||
name: argocd-application-controller
|
||||
spec:
|
||||
ports:
|
||||
- name: application-controller
|
||||
protocol: TCP
|
||||
port: 8082
|
||||
targetPort: 8082
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 8084
|
||||
targetPort: 8084
|
||||
selector:
|
||||
app.kubernetes.io/name: argocd-application-controller
|
||||
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- argocd-application-controller-service.yaml
|
||||
- argocd-application-controller-deployment.yaml
|
||||
@@ -210,4 +210,4 @@ spec:
|
||||
- key: tls.key
|
||||
path: tls.key
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
path: ca.crt
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.9.2
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -48,6 +48,12 @@ spec:
|
||||
key: notificationscontroller.log.level
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: application.namespaces
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
workingDir: /app
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
|
||||
@@ -32,6 +32,7 @@ rules:
|
||||
- "argoproj.io"
|
||||
resources:
|
||||
- "applications"
|
||||
- "applicationsets"
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.9.2
|
||||
|
||||
@@ -348,6 +348,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -646,6 +677,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -1058,6 +1120,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize adds
|
||||
to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas override
|
||||
specifications
|
||||
@@ -1346,6 +1439,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas override
|
||||
specifications
|
||||
@@ -1787,6 +1911,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -2088,6 +2243,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -2533,6 +2719,37 @@ spec:
|
||||
description: Namespace sets the namespace that
|
||||
Kustomize adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -2851,6 +3068,38 @@ spec:
|
||||
description: Namespace sets the namespace that
|
||||
Kustomize adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize
|
||||
patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize
|
||||
Replicas override specifications
|
||||
@@ -3283,6 +3532,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -3594,6 +3874,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -4049,6 +4360,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
@@ -4360,6 +4702,37 @@ spec:
|
||||
description: Namespace sets the namespace that Kustomize
|
||||
adds to all resources
|
||||
type: string
|
||||
patches:
|
||||
description: Patches is a list of Kustomize patches
|
||||
items:
|
||||
properties:
|
||||
options:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
type: object
|
||||
patch:
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
target:
|
||||
properties:
|
||||
annotationSelector:
|
||||
type: string
|
||||
group:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
labelSelector:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
replicas:
|
||||
description: Replicas is a list of Kustomize Replicas
|
||||
override specifications
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,15 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: argocd-application-controller
|
||||
spec:
|
||||
replicas: 0
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: argocd-application-controller
|
||||
args:
|
||||
- /usr/local/bin/argocd-application-controller
|
||||
env:
|
||||
- name: ARGOCD_CONTROLLER_REPLICAS
|
||||
value: "0"
|
||||
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
redis.server: argocd-redis-ha-haproxy:6379
|
||||
@@ -0,0 +1,26 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: argocd-repo-server
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-repo-server
|
||||
topologyKey: kubernetes.io/hostname
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-repo-server
|
||||
topologyKey: topology.kubernetes.io/zone
|
||||
containers:
|
||||
- name: argocd-repo-server
|
||||
args:
|
||||
- /usr/local/bin/argocd-repo-server
|
||||
@@ -0,0 +1,29 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: argocd-server
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-server
|
||||
topologyKey: kubernetes.io/hostname
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: argocd-server
|
||||
topologyKey: topology.kubernetes.io/zone
|
||||
containers:
|
||||
- name: argocd-server
|
||||
env:
|
||||
- name: ARGOCD_API_SERVER_REPLICAS
|
||||
value: '2'
|
||||
args:
|
||||
- /usr/local/bin/argocd-server
|
||||
25
manifests/ha/base/controller-deployment/kustomization.yaml
Normal file
25
manifests/ha/base/controller-deployment/kustomization.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
|
||||
patches:
|
||||
- path: argocd-repo-server-deployment.yaml
|
||||
- path: argocd-server-deployment.yaml
|
||||
- path: argocd-application-controller-statefulset.yaml
|
||||
- path: argocd-cmd-params-cm.yaml
|
||||
|
||||
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
resources:
|
||||
- ../../../base/application-controller
|
||||
- ../../../base/application-controller-deployment
|
||||
- ../../../base/applicationset-controller
|
||||
- ../../../base/dex
|
||||
- ../../../base/repo-server
|
||||
- ../../../base/server
|
||||
- ../../../base/config
|
||||
- ../../../base/notification
|
||||
- ../redis-ha
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.9.2
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
@@ -8,4 +8,4 @@ spec:
|
||||
containers:
|
||||
- name: argocd-application-controller
|
||||
args:
|
||||
- /usr/local/bin/argocd-application-controller
|
||||
- /usr/local/bin/argocd-application-controller
|
||||
@@ -1094,7 +1094,13 @@ spec:
|
||||
args:
|
||||
- /readonly/haproxy_init.sh
|
||||
securityContext:
|
||||
null
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /readonly
|
||||
@@ -1106,7 +1112,13 @@ spec:
|
||||
image: haproxy:2.6.14-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
null
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@@ -1204,7 +1216,14 @@ spec:
|
||||
args:
|
||||
- /readonly-config/init.sh
|
||||
securityContext:
|
||||
null
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
env:
|
||||
- name: SENTINEL_ID_0
|
||||
value: 3c0d9c0320bb34888c2df5757c718ce6ca992ce6
|
||||
@@ -1229,7 +1248,14 @@ spec:
|
||||
args:
|
||||
- /data/conf/redis.conf
|
||||
securityContext:
|
||||
null
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 15
|
||||
@@ -1279,7 +1305,14 @@ spec:
|
||||
args:
|
||||
- /data/conf/sentinel.conf
|
||||
securityContext:
|
||||
null
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 15
|
||||
@@ -1323,7 +1356,14 @@ spec:
|
||||
args:
|
||||
- /readonly-config/fix-split-brain.sh
|
||||
securityContext:
|
||||
null
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
env:
|
||||
- name: SENTINEL_ID_0
|
||||
value: 3c0d9c0320bb34888c2df5757c718ce6ca992ce6
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1654,7 +1654,7 @@ spec:
|
||||
key: applicationsetcontroller.allowed.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1777,7 +1777,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1847,7 +1847,13 @@ spec:
|
||||
key: notificationscontroller.log.level
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
- name: ARGOCD_APPLICATION_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: application.namespaces
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2178,7 +2184,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2230,7 +2236,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2519,7 +2525,7 @@ spec:
|
||||
key: server.enable.proxy.extension
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2765,7 +2771,7 @@ spec:
|
||||
key: controller.kubectl.parallelism.limit
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -749,7 +749,7 @@ spec:
|
||||
key: applicationsetcontroller.allowed.scm.providers
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -872,7 +872,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -942,7 +942,13 @@ spec:
|
||||
key: notificationscontroller.log.level
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
- name: ARGOCD_APPLICATION_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: application.namespaces
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1224,7 +1230,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1276,7 +1282,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1563,7 +1569,7 @@ spec:
|
||||
key: server.enable.proxy.extension
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -1809,7 +1815,7 @@ spec:
|
||||
key: controller.kubectl.parallelism.limit
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v2.9.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -123,6 +123,7 @@ nav:
|
||||
- operator-manual/server-commands/additional-configuration-method.md
|
||||
- Upgrading:
|
||||
- operator-manual/upgrading/overview.md
|
||||
- operator-manual/upgrading/2.8-2.9.md
|
||||
- operator-manual/upgrading/2.7-2.8.md
|
||||
- operator-manual/upgrading/2.6-2.7.md
|
||||
- operator-manual/upgrading/2.5-2.6.md
|
||||
|
||||
@@ -6,10 +6,14 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/notification/k8s"
|
||||
|
||||
service "github.com/argoproj/argo-cd/v2/util/notification/argocd"
|
||||
|
||||
argocert "github.com/argoproj/argo-cd/v2/util/cert"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/notification/settings"
|
||||
@@ -19,6 +23,7 @@ import (
|
||||
"github.com/argoproj/notifications-engine/pkg/controller"
|
||||
"github.com/argoproj/notifications-engine/pkg/services"
|
||||
"github.com/argoproj/notifications-engine/pkg/subscriptions"
|
||||
httputil "github.com/argoproj/notifications-engine/pkg/util/http"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -53,14 +58,21 @@ func NewController(
|
||||
client dynamic.Interface,
|
||||
argocdService service.Service,
|
||||
namespace string,
|
||||
applicationNamespaces []string,
|
||||
appLabelSelector string,
|
||||
registry *controller.MetricsRegistry,
|
||||
secretName string,
|
||||
configMapName string,
|
||||
) *notificationController {
|
||||
appClient := client.Resource(applications)
|
||||
appInformer := newInformer(appClient.Namespace(namespace), appLabelSelector)
|
||||
appProjInformer := newInformer(newAppProjClient(client, namespace), "")
|
||||
var appClient dynamic.ResourceInterface
|
||||
namespaceableAppClient := client.Resource(applications)
|
||||
appClient = namespaceableAppClient
|
||||
if len(applicationNamespaces) == 0 {
|
||||
appClient = namespaceableAppClient.Namespace(namespace)
|
||||
}
|
||||
|
||||
appInformer := newInformer(appClient, namespace, applicationNamespaces, appLabelSelector)
|
||||
appProjInformer := newInformer(newAppProjClient(client, namespace), namespace, []string{namespace}, "")
|
||||
secretInformer := k8s.NewSecretInformer(k8sClient, namespace, secretName)
|
||||
configMapInformer := k8s.NewConfigMapInformer(k8sClient, namespace, configMapName)
|
||||
apiFactory := api.NewFactory(settings.GetFactorySettings(argocdService, secretName, configMapName), namespace, secretInformer, configMapInformer)
|
||||
@@ -71,12 +83,15 @@ func NewController(
|
||||
appInformer: appInformer,
|
||||
appProjInformer: appProjInformer,
|
||||
apiFactory: apiFactory}
|
||||
res.ctrl = controller.NewController(appClient, appInformer, apiFactory,
|
||||
res.ctrl = controller.NewController(namespaceableAppClient, appInformer, apiFactory,
|
||||
controller.WithSkipProcessing(func(obj v1.Object) (bool, string) {
|
||||
app, ok := (obj).(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return false, ""
|
||||
}
|
||||
if checkAppNotInAdditionalNamespaces(app, namespace, applicationNamespaces) {
|
||||
return true, "app is not in one of the application-namespaces, nor the notification controller namespace"
|
||||
}
|
||||
return !isAppSyncStatusRefreshed(app, log.WithField("app", obj.GetName())), "sync status out of date"
|
||||
}),
|
||||
controller.WithMetricsRegistry(registry),
|
||||
@@ -84,6 +99,11 @@ func NewController(
|
||||
return res
|
||||
}
|
||||
|
||||
// Check if app is not in the namespace where the controller is in, and also app is not in one of the applicationNamespaces
|
||||
func checkAppNotInAdditionalNamespaces(app *unstructured.Unstructured, namespace string, applicationNamespaces []string) bool {
|
||||
return namespace != app.GetNamespace() && !glob.MatchStringInList(applicationNamespaces, app.GetNamespace(), false)
|
||||
}
|
||||
|
||||
func (c *notificationController) alterDestinations(obj v1.Object, destinations services.Destinations, cfg api.Config) services.Destinations {
|
||||
app, ok := (obj).(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
@@ -97,21 +117,38 @@ func (c *notificationController) alterDestinations(obj v1.Object, destinations s
|
||||
return destinations
|
||||
}
|
||||
|
||||
func newInformer(resClient dynamic.ResourceInterface, selector string) cache.SharedIndexInformer {
|
||||
func newInformer(resClient dynamic.ResourceInterface, controllerNamespace string, applicationNamespaces []string, selector string) cache.SharedIndexInformer {
|
||||
informer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (object runtime.Object, err error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
// We are only interested in apps that exist in namespaces the
|
||||
// user wants to be enabled.
|
||||
options.LabelSelector = selector
|
||||
return resClient.List(context.Background(), options)
|
||||
appList, err := resClient.List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list applications: %w", err)
|
||||
}
|
||||
newItems := []unstructured.Unstructured{}
|
||||
for _, res := range appList.Items {
|
||||
if controllerNamespace == res.GetNamespace() || glob.MatchStringInList(applicationNamespaces, res.GetNamespace(), false) {
|
||||
newItems = append(newItems, res)
|
||||
}
|
||||
}
|
||||
appList.Items = newItems
|
||||
return appList, nil
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = selector
|
||||
return resClient.Watch(context.Background(), options)
|
||||
return resClient.Watch(context.TODO(), options)
|
||||
},
|
||||
},
|
||||
&unstructured.Unstructured{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{},
|
||||
cache.Indexers{
|
||||
cache.NamespaceIndex: func(obj interface{}) ([]string, error) {
|
||||
return cache.MetaNamespaceIndexFunc(obj)
|
||||
},
|
||||
},
|
||||
)
|
||||
return informer
|
||||
}
|
||||
@@ -126,6 +163,9 @@ type notificationController struct {
|
||||
}
|
||||
|
||||
func (c *notificationController) Init(ctx context.Context) error {
|
||||
// resolve certificates using injected "argocd-tls-certs-cm" ConfigMap
|
||||
httputil.SetCertResolver(argocert.GetCertificateForConnect)
|
||||
|
||||
go c.appInformer.Run(ctx.Done())
|
||||
go c.appProjInformer.Run(ctx.Done())
|
||||
go c.secretInformer.Run(ctx.Done())
|
||||
|
||||
@@ -115,6 +115,7 @@ func TestInit(t *testing.T) {
|
||||
dynamicClient,
|
||||
nil,
|
||||
"default",
|
||||
[]string{},
|
||||
appLabelSelector,
|
||||
nil,
|
||||
"my-secret",
|
||||
@@ -146,6 +147,7 @@ func TestInitTimeout(t *testing.T) {
|
||||
dynamicClient,
|
||||
nil,
|
||||
"default",
|
||||
[]string{},
|
||||
appLabelSelector,
|
||||
nil,
|
||||
"my-secret",
|
||||
@@ -164,3 +166,27 @@ func TestInitTimeout(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "Timed out waiting for caches to sync", err.Error())
|
||||
}
|
||||
|
||||
func TestCheckAppNotInAdditionalNamespaces(t *testing.T) {
|
||||
app := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"spec": map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
namespace := "argocd"
|
||||
var applicationNamespaces []string
|
||||
applicationNamespaces = append(applicationNamespaces, "namespace1")
|
||||
applicationNamespaces = append(applicationNamespaces, "namespace2")
|
||||
|
||||
// app is in same namespace as controller's namespace
|
||||
app.SetNamespace(namespace)
|
||||
assert.False(t, checkAppNotInAdditionalNamespaces(app, namespace, applicationNamespaces))
|
||||
|
||||
// app is not in the namespace as controller's namespace, but it is in one of the applicationNamespaces
|
||||
app.SetNamespace("namespace2")
|
||||
assert.False(t, checkAppNotInAdditionalNamespaces(app, "", applicationNamespaces))
|
||||
|
||||
// app is not in the namespace as controller's namespace, and it is not in any of the applicationNamespaces
|
||||
app.SetNamespace("namespace3")
|
||||
assert.True(t, checkAppNotInAdditionalNamespaces(app, "", applicationNamespaces))
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/ap
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationMatchExpression,Values
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationPreservedFields,Annotations
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationPreservedFields,Labels
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationSetResourceIgnoreDifferences,JQPathExpressions
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationSetResourceIgnoreDifferences,JSONPointers
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationSetRolloutStep,MatchExpressions
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationSetRolloutStrategy,Steps
|
||||
API rule violation: list_type_missing,github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1,ApplicationSetSpec,Generators
|
||||
|
||||
@@ -63,7 +63,8 @@ type ApplicationSetSpec struct {
|
||||
PreservedFields *ApplicationPreservedFields `json:"preservedFields,omitempty" protobuf:"bytes,6,opt,name=preservedFields"`
|
||||
GoTemplateOptions []string `json:"goTemplateOptions,omitempty" protobuf:"bytes,7,opt,name=goTemplateOptions"`
|
||||
// ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators
|
||||
ApplyNestedSelectors bool `json:"applyNestedSelectors,omitempty" protobuf:"bytes,8,name=applyNestedSelectors"`
|
||||
ApplyNestedSelectors bool `json:"applyNestedSelectors,omitempty" protobuf:"bytes,8,name=applyNestedSelectors"`
|
||||
IgnoreApplicationDifferences ApplicationSetIgnoreDifferences `json:"ignoreApplicationDifferences,omitempty" protobuf:"bytes,9,name=ignoreApplicationDifferences"`
|
||||
}
|
||||
|
||||
type ApplicationPreservedFields struct {
|
||||
@@ -127,6 +128,39 @@ type ApplicationSetSyncPolicy struct {
|
||||
ApplicationsSync *ApplicationsSyncPolicy `json:"applicationsSync,omitempty" protobuf:"bytes,2,opt,name=applicationsSync,casttype=ApplicationsSyncPolicy"`
|
||||
}
|
||||
|
||||
// ApplicationSetIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live
|
||||
// applications when applying changes from generated applications.
|
||||
type ApplicationSetIgnoreDifferences []ApplicationSetResourceIgnoreDifferences
|
||||
|
||||
func (a ApplicationSetIgnoreDifferences) ToApplicationIgnoreDifferences() []ResourceIgnoreDifferences {
|
||||
var result []ResourceIgnoreDifferences
|
||||
for _, item := range a {
|
||||
result = append(result, item.ToApplicationResourceIgnoreDifferences())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live
|
||||
// applications when applying changes from generated applications.
|
||||
type ApplicationSetResourceIgnoreDifferences struct {
|
||||
// Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications.
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
|
||||
// JSONPointers is a list of JSON pointers to fields to ignore differences for.
|
||||
JSONPointers []string `json:"jsonPointers,omitempty" protobuf:"bytes,2,name=jsonPointers"`
|
||||
// JQPathExpressions is a list of JQ path expressions to fields to ignore differences for.
|
||||
JQPathExpressions []string `json:"jqPathExpressions,omitempty" protobuf:"bytes,3,name=jqExpressions"`
|
||||
}
|
||||
|
||||
func (a *ApplicationSetResourceIgnoreDifferences) ToApplicationResourceIgnoreDifferences() ResourceIgnoreDifferences {
|
||||
return ResourceIgnoreDifferences{
|
||||
Kind: ApplicationSchemaGroupVersionKind.Kind,
|
||||
Group: ApplicationSchemaGroupVersionKind.Group,
|
||||
Name: a.Name,
|
||||
JSONPointers: a.JSONPointers,
|
||||
JQPathExpressions: a.JQPathExpressions,
|
||||
}
|
||||
}
|
||||
|
||||
// ApplicationSetTemplate represents argocd ApplicationSpec
|
||||
type ApplicationSetTemplate struct {
|
||||
ApplicationSetTemplateMeta `json:"metadata" protobuf:"bytes,1,name=metadata"`
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user