mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
117 Commits
release-3.
...
v2.9.16
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb77b7028c | ||
|
|
2e7fbb0428 | ||
|
|
423ad6ba43 | ||
|
|
a28a57dadf | ||
|
|
59db3998e2 | ||
|
|
9552034a80 | ||
|
|
53570cbd14 | ||
|
|
26a868f76d | ||
|
|
703e32fafe | ||
|
|
f8c2e48a03 | ||
|
|
d774c422de | ||
|
|
2cb08f5c26 | ||
|
|
d3955f4f2a | ||
|
|
312cea0504 | ||
|
|
98203002ba | ||
|
|
a316a413ae | ||
|
|
c8f21e383a | ||
|
|
e2df7315fb | ||
|
|
9e4a0f523e | ||
|
|
7c21486b31 | ||
|
|
caab433303 | ||
|
|
d6cb4e903f | ||
|
|
710ee9a218 | ||
|
|
e0ff56d89f | ||
|
|
91bb155ed2 | ||
|
|
7ff0ccc34e | ||
|
|
b17323b41e | ||
|
|
1701f7edd3 | ||
|
|
ed5d9c3874 | ||
|
|
14f681e3ee | ||
|
|
8281b18831 | ||
|
|
5bbb51ab42 | ||
|
|
6e181d72b3 | ||
|
|
c415d3f3d5 | ||
|
|
759bd2888d | ||
|
|
2a410f3441 | ||
|
|
689a73a729 | ||
|
|
fbb6b20418 | ||
|
|
f05a6b7906 | ||
|
|
7aac4ba0f0 | ||
|
|
7cac5a8946 | ||
|
|
ba62a0a86d | ||
|
|
3fa66ec42c | ||
|
|
cc672e7609 | ||
|
|
cce1e97434 | ||
|
|
f43122d3cd | ||
|
|
f9436641a6 | ||
|
|
7561cc1cdd | ||
|
|
bb06722d98 | ||
|
|
997f89a92c | ||
|
|
f569aa105e | ||
|
|
dac68ac593 | ||
|
|
45d4a071ae | ||
|
|
a10cb870f8 | ||
|
|
8436954759 | ||
|
|
ab3b2e780e | ||
|
|
0e65b848da | ||
|
|
9075601a68 | ||
|
|
31860d09d4 | ||
|
|
65dec01de0 | ||
|
|
6eba5be864 | ||
|
|
cc56c9e8a2 | ||
|
|
30f68e1041 | ||
|
|
e63273e4c1 | ||
|
|
d5eaaa3527 | ||
|
|
c5ea5c4df5 | ||
|
|
c2c9746050 | ||
|
|
78cd50b2c7 | ||
|
|
dd86b08369 | ||
|
|
0ca43663c9 | ||
|
|
d4c37e2521 | ||
|
|
ba60fadd94 | ||
|
|
58b04e5e11 | ||
|
|
3acd5ee30d | ||
|
|
45d5de702e | ||
|
|
b8efc8b1ab | ||
|
|
9cf0c69bbe | ||
|
|
871b045368 | ||
|
|
7f9be43b8b | ||
|
|
78ad599120 | ||
|
|
557871d223 | ||
|
|
f29e4fbea3 | ||
|
|
766316ef74 | ||
|
|
eb0afcbc3d | ||
|
|
0083647b8b | ||
|
|
5289315c3f | ||
|
|
83ec3bfbf7 | ||
|
|
5c86f758c3 | ||
|
|
67e1e04afb | ||
|
|
266e92e3a1 | ||
|
|
6648d31671 | ||
|
|
82185106a2 | ||
|
|
b14837e58e | ||
|
|
9c4a90af91 | ||
|
|
3750adefa7 | ||
|
|
dc3d08e626 | ||
|
|
eaa9af21d7 | ||
|
|
55e5d6bf3e | ||
|
|
85422bbb17 | ||
|
|
79901a4e84 | ||
|
|
5506e8520c | ||
|
|
0a97e150d8 | ||
|
|
e1ac2f6071 | ||
|
|
8c3f38a97d | ||
|
|
8b9c448786 | ||
|
|
80baeb8a6c | ||
|
|
72f7b14594 | ||
|
|
b9bf46dfb9 | ||
|
|
d7c489b9cc | ||
|
|
38eb17a027 | ||
|
|
ea3402962f | ||
|
|
b3fabc23cd | ||
|
|
69d6d1064b | ||
|
|
d09621d36b | ||
|
|
728205618e | ||
|
|
912a2db05c | ||
|
|
d105196075 |
4
.github/workflows/ci-build.yaml
vendored
4
.github/workflows/ci-build.yaml
vendored
@@ -361,7 +361,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.27.2, v1.26.0, v1.25.4, v1.24.3]
|
||||
k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -428,7 +428,7 @@ jobs:
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.37.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.11-alpine
|
||||
docker pull redis:7.0.15-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
|
||||
12
.github/workflows/image-reuse.yaml
vendored
12
.github/workflows/image-reuse.yaml
vendored
@@ -74,9 +74,7 @@ jobs:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
|
||||
with:
|
||||
cosign-release: 'v2.0.0'
|
||||
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
|
||||
|
||||
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
|
||||
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
@@ -135,6 +133,14 @@ jobs:
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1
|
||||
|
||||
2
.github/workflows/image.yaml
vendored
2
.github/workflows/image.yaml
vendored
@@ -86,7 +86,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.10.0
|
||||
with:
|
||||
image: ghcr.io/argoproj/argo-cd/argocd
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
|
||||
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.10.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
@@ -120,7 +120,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
@@ -204,7 +204,7 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
|
||||
@@ -114,7 +114,7 @@ changelog:
|
||||
exclude:
|
||||
- '^test:'
|
||||
- '^.*?Bump(\([[:word:]]+\))?.+$'
|
||||
- '^.*?[Bot](\([[:word:]]+\))?.+$'
|
||||
- '^.*?\[Bot\](\([[:word:]]+\))?.+$'
|
||||
|
||||
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
|
||||
@@ -4,9 +4,9 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fca
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.21.1@sha256:2270a408c4cb38f8459839082d89afa4a2870773c509adf7641e9558167d0030 AS builder
|
||||
FROM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
openssh-server \
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.1@sha256:2270a408c4cb38f8459839082d89afa4a2870773c509adf7641e9558167d0030 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
@@ -25,7 +24,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@@ -46,13 +44,13 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
)
|
||||
@@ -163,13 +161,15 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
// If appset used progressive sync but stopped, clean up the progressive sync application statuses
|
||||
log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
} else {
|
||||
} else if applicationSetInfo.Spec.Strategy != nil {
|
||||
// appset uses progressive sync
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
@@ -439,8 +439,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
_, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
@@ -454,15 +453,6 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
continue
|
||||
}
|
||||
|
||||
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error validating permissions: %s", err)
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return errorsByIndex, nil
|
||||
@@ -634,7 +624,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, normalizers.IgnoreNormalizerOpts{}, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
@@ -687,13 +677,6 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
|
||||
if found != nil && len(found.Spec.IgnoreDifferences) > 0 {
|
||||
err := applyIgnoreDifferences(applicationSet.Spec.IgnoreApplicationDifferences, found, generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply ignore differences: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
|
||||
@@ -705,60 +688,20 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
continue
|
||||
}
|
||||
r.updateCache(ctx, found, appLog)
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
|
||||
if action != controllerutil.OperationResultNone {
|
||||
// Don't pollute etcd with "unchanged Application" events
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
} else {
|
||||
// "unchanged Application" can be inferred by Reconcile Complete with no action being listed
|
||||
// Or enable debug logging
|
||||
appLog.Logf(log.DebugLevel, "%s Application", action)
|
||||
}
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
|
||||
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the found application in place.
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp argov1alpha1.Application) error {
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
|
||||
WithNoCache().
|
||||
Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build diff config: %w", err)
|
||||
}
|
||||
unstructuredFound, err := appToUnstructured(found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
unstructuredGenerated, err := appToUnstructured(&generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to normalize application spec: %w", err)
|
||||
}
|
||||
if len(result.Targets) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
|
||||
}
|
||||
jsonNormalized, err := json.Marshal(result.Targets[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(jsonNormalized, &found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
|
||||
}
|
||||
// Prohibit jq queries from mutating silly things.
|
||||
found.TypeMeta = generatedApp.TypeMeta
|
||||
found.Name = generatedApp.Name
|
||||
found.Namespace = generatedApp.Namespace
|
||||
found.Operation = generatedApp.Operation
|
||||
return nil
|
||||
}
|
||||
|
||||
func appToUnstructured(app *argov1alpha1.Application) (*unstructured.Unstructured, error) {
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: u}, nil
|
||||
}
|
||||
|
||||
// createInCluster will filter from the desiredApplications only the application that needs to be created
|
||||
// Then it will call createOrUpdateInCluster to do the actual create
|
||||
func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
@@ -912,7 +855,11 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
if len(newFinalizers) != len(app.Finalizers) {
|
||||
updated := app.DeepCopy()
|
||||
updated.Finalizers = newFinalizers
|
||||
if err := r.Client.Patch(ctx, updated, client.MergeFrom(app)); err != nil {
|
||||
patch := client.MergeFrom(app)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
utils.LogPatch(appLog, patch, updated)
|
||||
}
|
||||
if err := r.Client.Patch(ctx, updated, patch); err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
}
|
||||
r.updateCache(ctx, updated, appLog)
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -981,6 +979,296 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "Ensure that ignored targetRevision difference doesn't cause an update, even if another field changes",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// The targetRevision is ignored, so this should not be updated.
|
||||
TargetRevision: "foo",
|
||||
// This should be updated.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// This is the existing value from the cluster, which should not be updated because the field is ignored.
|
||||
TargetRevision: "bar",
|
||||
// This was missing on the cluster, so it should be added.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
// This should not be updated, because reconciliation shouldn't modify the App.
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
// This existed only in the cluster, but it shouldn't be removed, because the field is ignored.
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "Demonstrate limitation of MergePatch", // Maybe we can fix this in Argo CD 3.0: https://github.com/argoproj/argo-cd/issues/15975
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
// The Parameters field got blown away, because the values field changed. MergePatch
|
||||
// doesn't merge list items, it replaces the whole list if an item changes.
|
||||
// If we eventually add a `name` field to Sources, we can use StrategicMergePatch.
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
||||
@@ -1014,7 +1302,6 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
}, got)
|
||||
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, obj, *got)
|
||||
}
|
||||
})
|
||||
@@ -1953,7 +2240,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
@@ -1961,9 +2248,8 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://good-cluster"}}},
|
||||
project := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"},
|
||||
}
|
||||
appSet := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1976,22 +2262,22 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "good-cluster","url": "https://good-cluster"}`),
|
||||
Raw: []byte(`{"project": "good-project"}`),
|
||||
}, {
|
||||
Raw: []byte(`{"cluster": "bad-cluster","url": "https://bad-cluster"}`),
|
||||
Raw: []byte(`{"project": "bad-project"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "{{.cluster}}",
|
||||
Name: "{{.project}}",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "{{.url}}"},
|
||||
Project: "{{.project}}",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "https://kubernetes.default.svc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1999,17 +2285,9 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
argoObjs := []runtime.Object{&project}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil)
|
||||
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
goodCluster,
|
||||
}}, nil)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -2041,12 +2319,12 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
var app v1alpha1.Application
|
||||
|
||||
// make sure good app got created
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-cluster"}, &app)
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-project"}, &app)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, app.Name, "good-cluster")
|
||||
assert.Equal(t, app.Name, "good-project")
|
||||
|
||||
// make sure bad app was not created
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "bad-cluster"}, &app)
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "bad-project"}, &app)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -5728,173 +6006,3 @@ func TestOwnsHandler(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_applyIgnoreDifferences(t *testing.T) {
|
||||
appMeta := metav1.TypeMeta{
|
||||
APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
|
||||
Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
|
||||
foundApp string
|
||||
generatedApp string
|
||||
expectedApp string
|
||||
}{
|
||||
{
|
||||
name: "empty ignoreDifferences",
|
||||
foundApp: `
|
||||
spec: {}`,
|
||||
generatedApp: `
|
||||
spec: {}`,
|
||||
expectedApp: `
|
||||
spec: {}`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore target revision with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: bar`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
|
||||
name: "ignore helm parameter with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: v1.0.0
|
||||
- name: another
|
||||
value: value`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore auto-sync with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
|
||||
name: "ignore a one-off annotation with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
|
||||
},
|
||||
foundApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
generatedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
some.other: annotation`,
|
||||
expectedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
|
||||
name: "ignore the source.plugin field with a json pointer",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JSONPointers: []string{"/spec/source/plugin"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com/wrong`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
|
||||
require.NoError(t, err, tc.foundApp)
|
||||
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
|
||||
require.NoError(t, err, tc.generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, generatedApp)
|
||||
require.NoError(t, err)
|
||||
jsonFound, err := json.Marshal(tc.foundApp)
|
||||
require.NoError(t, err)
|
||||
jsonExpected, err := json.Marshal(tc.expectedApp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(jsonExpected), string(jsonFound))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,12 +56,14 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
|
||||
noRevisionCache := appSet.RefreshRequired()
|
||||
|
||||
var err error
|
||||
var res []map[string]interface{}
|
||||
if len(appSetGenerator.Git.Directories) != 0 {
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator, noRevisionCache, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
} else if len(appSetGenerator.Git.Files) != 0 {
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator, noRevisionCache, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
} else {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
@@ -72,10 +74,10 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, noRevisionCache bool, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
|
||||
|
||||
// Directories, not files
|
||||
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
|
||||
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, noRevisionCache)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting directories from repo: %w", err)
|
||||
}
|
||||
@@ -98,12 +100,12 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, noRevisionCache bool, useGoTemplate bool, goTemplateOptions []string) ([]map[string]interface{}, error) {
|
||||
|
||||
// Get all files that match the requested path string, removing duplicates
|
||||
allFiles := make(map[string][]byte)
|
||||
for _, requestedPath := range appSetGenerator.Git.Files {
|
||||
files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path)
|
||||
files, err := g.repos.GetFiles(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision, requestedPath.Path, noRevisionCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -263,7 +263,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
@@ -559,7 +559,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
@@ -918,7 +918,7 @@ cluster:
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
@@ -1268,7 +1268,7 @@ cluster:
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(&argoCDServiceMock)
|
||||
|
||||
@@ -1108,7 +1108,7 @@ func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
}
|
||||
|
||||
repoServiceMock := &mocks.Repos{}
|
||||
repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
"some/path.json": []byte("test: content"),
|
||||
}, nil)
|
||||
gitGenerator := NewGitGenerator(repoServiceMock)
|
||||
|
||||
@@ -13,25 +13,25 @@ type Repos struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetDirectories provides a mock function with given fields: ctx, repoURL, revision
|
||||
func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
ret := _m.Called(ctx, repoURL, revision)
|
||||
// GetDirectories provides a mock function with given fields: ctx, repoURL, revision, noRevisionCache
|
||||
func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error) {
|
||||
ret := _m.Called(ctx, repoURL, revision, noRevisionCache)
|
||||
|
||||
var r0 []string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]string, error)); ok {
|
||||
return rf(ctx, repoURL, revision)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) ([]string, error)); ok {
|
||||
return rf(ctx, repoURL, revision, noRevisionCache)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string) []string); ok {
|
||||
r0 = rf(ctx, repoURL, revision)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, bool) []string); ok {
|
||||
r0 = rf(ctx, repoURL, revision, noRevisionCache)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]string)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
|
||||
r1 = rf(ctx, repoURL, revision)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, bool) error); ok {
|
||||
r1 = rf(ctx, repoURL, revision, noRevisionCache)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -39,25 +39,25 @@ func (_m *Repos) GetDirectories(ctx context.Context, repoURL string, revision st
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern
|
||||
func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
ret := _m.Called(ctx, repoURL, revision, pattern)
|
||||
// GetFiles provides a mock function with given fields: ctx, repoURL, revision, pattern, noRevisionCache
|
||||
func (_m *Repos) GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error) {
|
||||
ret := _m.Called(ctx, repoURL, revision, pattern, noRevisionCache)
|
||||
|
||||
var r0 map[string][]byte
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (map[string][]byte, error)); ok {
|
||||
return rf(ctx, repoURL, revision, pattern)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) (map[string][]byte, error)); ok {
|
||||
return rf(ctx, repoURL, revision, pattern, noRevisionCache)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) map[string][]byte); ok {
|
||||
r0 = rf(ctx, repoURL, revision, pattern)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) map[string][]byte); ok {
|
||||
r0 = rf(ctx, repoURL, revision, pattern, noRevisionCache)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[string][]byte)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
|
||||
r1 = rf(ctx, repoURL, revision, pattern)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, string, bool) error); ok {
|
||||
r1 = rf(ctx, repoURL, revision, pattern, noRevisionCache)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/vektra/mockery/v2@v2.25.1 --name=RepositoryDB
|
||||
|
||||
// RepositoryDB Is a lean facade for ArgoDB,
|
||||
// Using a lean interface makes it easier to test the functionality of the git generator
|
||||
type RepositoryDB interface {
|
||||
@@ -25,13 +27,15 @@ type argoCDService struct {
|
||||
newFileGlobbingEnabled bool
|
||||
}
|
||||
|
||||
//go:generate go run github.com/vektra/mockery/v2@v2.25.1 --name=Repos
|
||||
|
||||
type Repos interface {
|
||||
|
||||
// GetFiles returns content of files (not directories) within the target repo
|
||||
GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error)
|
||||
GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error)
|
||||
|
||||
// GetDirectories returns a list of directories (not files) within the target repo
|
||||
GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error)
|
||||
GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error)
|
||||
}
|
||||
|
||||
func NewArgoCDService(db db.ArgoDB, submoduleEnabled bool, repoClientset apiclient.Clientset, newFileGlobbingEnabled bool) (Repos, error) {
|
||||
@@ -43,7 +47,7 @@ func NewArgoCDService(db db.ArgoDB, submoduleEnabled bool, repoClientset apiclie
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision string, pattern string, noRevisionCache bool) (map[string][]byte, error) {
|
||||
repo, err := a.repositoriesDB.GetRepository(ctx, repoURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in GetRepository: %w", err)
|
||||
@@ -55,6 +59,7 @@ func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision s
|
||||
Revision: revision,
|
||||
Path: pattern,
|
||||
NewGitFileGlobbingEnabled: a.newFileGlobbingEnabled,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
}
|
||||
closer, client, err := a.repoServerClientSet.NewRepoServerClient()
|
||||
if err != nil {
|
||||
@@ -69,7 +74,7 @@ func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision s
|
||||
return fileResponse.GetMap(), nil
|
||||
}
|
||||
|
||||
func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revision string, noRevisionCache bool) ([]string, error) {
|
||||
repo, err := a.repositoriesDB.GetRepository(ctx, repoURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in GetRepository: %w", err)
|
||||
@@ -79,6 +84,7 @@ func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revi
|
||||
Repo: repo,
|
||||
SubmoduleEnabled: a.submoduleEnabled,
|
||||
Revision: revision,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
}
|
||||
|
||||
closer, client, err := a.repoServerClientSet.NewRepoServerClient()
|
||||
|
||||
@@ -25,9 +25,10 @@ func TestGetDirectories(t *testing.T) {
|
||||
repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient)
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
repoURL string
|
||||
revision string
|
||||
ctx context.Context
|
||||
repoURL string
|
||||
revision string
|
||||
noRevisionCache bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -88,11 +89,11 @@ func TestGetDirectories(t *testing.T) {
|
||||
submoduleEnabled: tt.fields.submoduleEnabled,
|
||||
repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient},
|
||||
}
|
||||
got, err := a.GetDirectories(tt.args.ctx, tt.args.repoURL, tt.args.revision)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision)) {
|
||||
got, err := a.GetDirectories(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("GetDirectories(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)) {
|
||||
return
|
||||
}
|
||||
assert.Equalf(t, tt.want, got, "GetDirectories(%v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision)
|
||||
assert.Equalf(t, tt.want, got, "GetDirectories(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.noRevisionCache)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -105,10 +106,11 @@ func TestGetFiles(t *testing.T) {
|
||||
repoServerClientFuncs []func(*repo_mocks.RepoServerServiceClient)
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
repoURL string
|
||||
revision string
|
||||
pattern string
|
||||
ctx context.Context
|
||||
repoURL string
|
||||
revision string
|
||||
pattern string
|
||||
noRevisionCache bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -175,11 +177,11 @@ func TestGetFiles(t *testing.T) {
|
||||
submoduleEnabled: tt.fields.submoduleEnabled,
|
||||
repoServerClientSet: &repo_mocks.Clientset{RepoServerServiceClient: mockRepoClient},
|
||||
}
|
||||
got, err := a.GetFiles(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)) {
|
||||
got, err := a.GetFiles(tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("GetFiles(%v, %v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)) {
|
||||
return
|
||||
}
|
||||
assert.Equalf(t, tt.want, got, "GetFiles(%v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern)
|
||||
assert.Equalf(t, tt.want, got, "GetFiles(%v, %v, %v, %v, %v)", tt.args.ctx, tt.args.repoURL, tt.args.revision, tt.args.pattern, tt.args.noRevisionCache)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,18 +2,25 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function
|
||||
@@ -29,7 +36,7 @@ import (
|
||||
// The MutateFn is called regardless of creating or updating an object.
|
||||
//
|
||||
// It returns the executed operation and an error.
|
||||
func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
|
||||
key := client.ObjectKeyFromObject(obj)
|
||||
if err := c.Get(ctx, key, obj); err != nil {
|
||||
@@ -45,15 +52,24 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
|
||||
return controllerutil.OperationResultCreated, nil
|
||||
}
|
||||
|
||||
existingObj := obj.DeepCopyObject()
|
||||
existing, ok := existingObj.(client.Object)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("existing object is not a client.Object"))
|
||||
}
|
||||
normalizedLive := obj.DeepCopy()
|
||||
|
||||
// Mutate the live object to match the desired state.
|
||||
if err := mutate(f, key, obj); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
|
||||
// Apply ignoreApplicationDifferences rules to remove ignored fields from both the live and the desired state. This
|
||||
// prevents those differences from appearing in the diff and therefore in the patch.
|
||||
err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj, ignoreNormalizerOpts)
|
||||
if err != nil {
|
||||
return controllerutil.OperationResultNone, fmt.Errorf("failed to apply ignore differences: %w", err)
|
||||
}
|
||||
|
||||
// Normalize to avoid diffing on unimportant differences.
|
||||
normalizedLive.Spec = *argo.NormalizeApplicationSpec(&normalizedLive.Spec)
|
||||
obj.Spec = *argo.NormalizeApplicationSpec(&obj.Spec)
|
||||
|
||||
equality := conversion.EqualitiesOrDie(
|
||||
func(a, b resource.Quantity) bool {
|
||||
// Ignore formatting, only care that numeric value stayed the same.
|
||||
@@ -79,16 +95,34 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
|
||||
},
|
||||
)
|
||||
|
||||
if equality.DeepEqual(existing, obj) {
|
||||
if equality.DeepEqual(normalizedLive, obj) {
|
||||
return controllerutil.OperationResultNone, nil
|
||||
}
|
||||
|
||||
if err := c.Patch(ctx, obj, client.MergeFrom(existing)); err != nil {
|
||||
patch := client.MergeFrom(normalizedLive)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
LogPatch(logCtx, patch, obj)
|
||||
}
|
||||
if err := c.Patch(ctx, obj, patch); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
return controllerutil.OperationResultUpdated, nil
|
||||
}
|
||||
|
||||
func LogPatch(logCtx *log.Entry, patch client.Patch, obj *argov1alpha1.Application) {
|
||||
patchBytes, err := patch.Data(obj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to generate patch: %v", err)
|
||||
}
|
||||
// Get the patch as a plain object so it is easier to work with in json logs.
|
||||
var patchObj map[string]interface{}
|
||||
err = json.Unmarshal(patchBytes, &patchObj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to unmarshal patch: %v", err)
|
||||
}
|
||||
logCtx.WithField("patch", patchObj).Debug("patching application")
|
||||
}
|
||||
|
||||
// mutate wraps a MutateFn and applies validation to its result
|
||||
func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object) error {
|
||||
if err := f(); err != nil {
|
||||
@@ -99,3 +133,71 @@ func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the applications in place.
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) error {
|
||||
if len(applicationSetIgnoreDifferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
generatedAppCopy := generatedApp.DeepCopy()
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false, ignoreNormalizerOpts).
|
||||
WithNoCache().
|
||||
Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build diff config: %w", err)
|
||||
}
|
||||
unstructuredFound, err := appToUnstructured(found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
unstructuredGenerated, err := appToUnstructured(generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to normalize application spec: %w", err)
|
||||
}
|
||||
if len(result.Lives) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Lives))
|
||||
}
|
||||
foundJsonNormalized, err := json.Marshal(result.Lives[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
foundNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(foundJsonNormalized, &foundNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app to json: %w", err)
|
||||
}
|
||||
if len(result.Targets) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
|
||||
}
|
||||
foundNormalized.DeepCopyInto(found)
|
||||
generatedJsonNormalized, err := json.Marshal(result.Targets[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
generatedAppNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(generatedJsonNormalized, &generatedAppNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
|
||||
}
|
||||
generatedAppNormalized.DeepCopyInto(generatedApp)
|
||||
// Prohibit jq queries from mutating silly things.
|
||||
generatedApp.TypeMeta = generatedAppCopy.TypeMeta
|
||||
generatedApp.Name = generatedAppCopy.Name
|
||||
generatedApp.Namespace = generatedAppCopy.Namespace
|
||||
generatedApp.Operation = generatedAppCopy.Operation
|
||||
return nil
|
||||
}
|
||||
|
||||
func appToUnstructured(app client.Object) (*unstructured.Unstructured, error) {
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: u}, nil
|
||||
}
|
||||
|
||||
235
applicationset/utils/createOrUpdate_test.go
Normal file
235
applicationset/utils/createOrUpdate_test.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
func Test_applyIgnoreDifferences(t *testing.T) {
|
||||
appMeta := metav1.TypeMeta{
|
||||
APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
|
||||
Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
|
||||
foundApp string
|
||||
generatedApp string
|
||||
expectedApp string
|
||||
}{
|
||||
{
|
||||
name: "empty ignoreDifferences",
|
||||
foundApp: `
|
||||
spec: {}`,
|
||||
generatedApp: `
|
||||
spec: {}`,
|
||||
expectedApp: `
|
||||
spec: {}`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore target revision with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: bar`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
|
||||
name: "ignore helm parameter with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: v1.0.0
|
||||
- name: another
|
||||
value: value`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore auto-sync in appset when it's not in the cluster with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
name: "ignore auto-sync in the cluster when it's not in the appset with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
|
||||
name: "ignore a one-off annotation with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
|
||||
},
|
||||
foundApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
generatedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
some.other: annotation`,
|
||||
expectedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
|
||||
name: "ignore the source.plugin field with a json pointer",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JSONPointers: []string{"/spec/source/plugin"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com/wrong`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
|
||||
require.NoError(t, err, tc.foundApp)
|
||||
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
|
||||
require.NoError(t, err, tc.generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp, normalizers.IgnoreNormalizerOpts{})
|
||||
require.NoError(t, err)
|
||||
yamlFound, err := yaml.Marshal(tc.foundApp)
|
||||
require.NoError(t, err)
|
||||
yamlExpected, err := yaml.Marshal(tc.expectedApp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(yamlExpected), string(yamlFound))
|
||||
})
|
||||
}
|
||||
}
|
||||
71
applicationset/utils/template_functions.go
Normal file
71
applicationset/utils/template_functions.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// toYAML takes an interface, marshals it to yaml, and returns a string. It will
|
||||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toYAML(v interface{}) (string, error) {
|
||||
data, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
// Swallow errors inside of a template.
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSuffix(string(data), "\n"), nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAML converts a YAML document into a map[string]interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string into
|
||||
// m["Error"] in the returned map.
|
||||
func fromYAML(str string) (map[string]interface{}, error) {
|
||||
m := map[string]interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAMLArray converts a YAML array into a []interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string as
|
||||
// the first and only item in the returned array.
|
||||
func fromYAMLArray(str string) ([]interface{}, error) {
|
||||
a := []interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
@@ -32,6 +32,9 @@ func init() {
|
||||
delete(sprigFuncMap, "expandenv")
|
||||
delete(sprigFuncMap, "getHostByName")
|
||||
sprigFuncMap["normalize"] = SanitizeName
|
||||
sprigFuncMap["toYaml"] = toYAML
|
||||
sprigFuncMap["fromYaml"] = fromYAML
|
||||
sprigFuncMap["fromYamlArray"] = fromYAMLArray
|
||||
}
|
||||
|
||||
type Renderer interface {
|
||||
@@ -431,23 +434,6 @@ func NormalizeBitbucketBasePath(basePath string) string {
|
||||
return basePath
|
||||
}
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
|
||||
func getTlsConfigWithCACert(scmRootCAPath string) *tls.Config {
|
||||
|
||||
tlsConfig := &tls.Config{}
|
||||
|
||||
@@ -555,6 +555,64 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
templateOptions: []string{"missingkey=error"},
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: :1:6: executing "" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
},
|
||||
{
|
||||
name: "toYaml",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
params: map[string]interface{}{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": map[string]interface{}{
|
||||
"bool": true,
|
||||
"number": 2,
|
||||
"str": "Hello world",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "toYaml Error",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: :1:3: executing \"\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
params: map[string]interface{}{
|
||||
"foo": func(test *string) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
params: map[string]interface{}{
|
||||
"value": "hello: world",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml error",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: :1:8: executing \"\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
params: map[string]interface{}{
|
||||
"value": "- hello world\n- bonjour tout le monde",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray error",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: :1:3: executing \"\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -5089,7 +5089,7 @@
|
||||
}
|
||||
},
|
||||
"runtimeRawExtension": {
|
||||
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this:\n{\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
|
||||
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"raw": {
|
||||
@@ -5571,8 +5571,8 @@
|
||||
}
|
||||
},
|
||||
"v1ObjectReference": {
|
||||
"description": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
|
||||
"type": "object",
|
||||
"title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"type": "string",
|
||||
@@ -7396,6 +7396,7 @@
|
||||
"properties": {
|
||||
"elements": {
|
||||
"type": "array",
|
||||
"title": "+kubebuilder:validation:Optional",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1JSON"
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
@@ -45,28 +46,30 @@ const (
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
metricsCacheExpiration time.Duration
|
||||
metricsAplicationLabels []string
|
||||
kubectlParallelismLimit int64
|
||||
cacheSource func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
repoServerPlaintext bool
|
||||
repoServerStrictTLS bool
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
applicationNamespaces []string
|
||||
persistResourceHealth bool
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
metricsCacheExpiration time.Duration
|
||||
metricsAplicationLabels []string
|
||||
kubectlParallelismLimit int64
|
||||
cacheSource func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
repoServerPlaintext bool
|
||||
repoServerStrictTLS bool
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
applicationNamespaces []string
|
||||
persistResourceHealth bool
|
||||
shardingAlgorithm string
|
||||
enableDynamicClusterDistribution bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -139,7 +142,7 @@ func NewCommand() *cobra.Command {
|
||||
appController.InvalidateProjectsCache()
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm)
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
@@ -159,6 +162,7 @@ func NewCommand() *cobra.Command {
|
||||
persistResourceHealth,
|
||||
clusterFilter,
|
||||
applicationNamespaces,
|
||||
ignoreNormalizerOpts,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
@@ -204,21 +208,28 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that applications are allowed to be reconciled from")
|
||||
command.Flags().BoolVar(&persistResourceHealth, "persist-resource-health", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH", true), "Enables storing the managed resources health in the Application CRD")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string) sharding.ClusterFilterFunction {
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {
|
||||
|
||||
var replicas int
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, _ := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
if appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
// if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
|
||||
if err != nil && kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
}
|
||||
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicas = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
@@ -228,7 +239,7 @@ func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.Se
|
||||
if replicas > 1 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if appControllerDeployment != nil {
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil {
|
||||
|
||||
var err error
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
|
||||
@@ -55,6 +55,7 @@ func NewCommand() *cobra.Command {
|
||||
argocdRepoServerStrictTLS bool
|
||||
configMapName string
|
||||
secretName string
|
||||
applicationNamespaces []string
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "controller",
|
||||
@@ -138,7 +139,7 @@ func NewCommand() *cobra.Command {
|
||||
log.Infof("serving metrics on port %d", metricsPort)
|
||||
log.Infof("loading configuration %d", metricsPort)
|
||||
|
||||
ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, appLabelSelector, registry, secretName, configMapName)
|
||||
ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, applicationNamespaces, appLabelSelector, registry, secretName, configMapName)
|
||||
err = ctrl.Init(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize controller: %w", err)
|
||||
@@ -161,5 +162,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name")
|
||||
command.Flags().StringVar(&secretName, "secret-name", "argocd-notifications-secret", "Set notifications Secret name")
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that this controller should send notifications for")
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -66,6 +66,7 @@ func NewCommand() *cobra.Command {
|
||||
streamedManifestMaxTarSize string
|
||||
streamedManifestMaxExtractedSize string
|
||||
helmManifestMaxExtractedSize string
|
||||
helmRegistryMaxIndexSize string
|
||||
disableManifestMaxExtractedSize bool
|
||||
)
|
||||
var command = cobra.Command{
|
||||
@@ -108,6 +109,9 @@ func NewCommand() *cobra.Command {
|
||||
helmManifestMaxExtractedSizeQuantity, err := resource.ParseQuantity(helmManifestMaxExtractedSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
helmRegistryMaxIndexSizeQuantity, err := resource.ParseQuantity(helmRegistryMaxIndexSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
askPassServer := askpass.NewServer()
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
cacheutil.CollectMetrics(redisClient, metricsServer)
|
||||
@@ -123,6 +127,7 @@ func NewCommand() *cobra.Command {
|
||||
StreamedManifestMaxExtractedSize: streamedManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
StreamedManifestMaxTarSize: streamedManifestMaxTarSizeQuantity.ToDec().Value(),
|
||||
HelmManifestMaxExtractedSize: helmManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
HelmRegistryMaxIndexSize: helmRegistryMaxIndexSizeQuantity.ToDec().Value(),
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -204,6 +209,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&streamedManifestMaxTarSize, "streamed-manifest-max-tar-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_TAR_SIZE", "100M"), "Maximum size of streamed manifest archives")
|
||||
command.Flags().StringVar(&streamedManifestMaxExtractedSize, "streamed-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of streamed manifest archives when extracted")
|
||||
command.Flags().StringVar(&helmManifestMaxExtractedSize, "helm-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of helm manifest archives when extracted")
|
||||
command.Flags().StringVar(&helmRegistryMaxIndexSize, "helm-registry-max-index-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_INDEX_SIZE", "1G"), "Maximum size of registry index file")
|
||||
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
@@ -58,6 +59,7 @@ func NewCommand() *cobra.Command {
|
||||
repoServerAddress string
|
||||
dexServerAddress string
|
||||
disableAuth bool
|
||||
contentTypes string
|
||||
enableGZip bool
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
cacheSrc func() (*servercache.Cache, error)
|
||||
@@ -162,6 +164,11 @@ func NewCommand() *cobra.Command {
|
||||
baseHRef = rootPath
|
||||
}
|
||||
|
||||
var contentTypesList []string
|
||||
if contentTypes != "" {
|
||||
contentTypesList = strings.Split(contentTypes, ";")
|
||||
}
|
||||
|
||||
argoCDOpts := server.ArgoCDServerOpts{
|
||||
Insecure: insecure,
|
||||
ListenPort: listenPort,
|
||||
@@ -177,6 +184,7 @@ func NewCommand() *cobra.Command {
|
||||
DexServerAddr: dexServerAddress,
|
||||
DexTLSConfig: dexTlsConfig,
|
||||
DisableAuth: disableAuth,
|
||||
ContentTypes: contentTypesList,
|
||||
EnableGZip: enableGZip,
|
||||
TLSConfigCustomizer: tlsConfigCustomizer,
|
||||
Cache: cache,
|
||||
@@ -224,6 +232,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_SERVER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address")
|
||||
command.Flags().StringVar(&dexServerAddress, "dex-server", env.StringFromEnv("ARGOCD_SERVER_DEX_SERVER", common.DefaultDexServerAddr), "Dex server address")
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json", env.StringFromEnvOpts{AllowEmpty: true}), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
|
||||
|
||||
@@ -57,9 +57,10 @@ func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command.AddCommand(NewRepoCommand())
|
||||
command.AddCommand(NewImportCommand())
|
||||
command.AddCommand(NewExportCommand())
|
||||
command.AddCommand(NewDashboardCommand())
|
||||
command.AddCommand(NewDashboardCommand(clientOpts))
|
||||
command.AddCommand(NewNotificationsCommand())
|
||||
command.AddCommand(NewInitialPasswordCommand())
|
||||
command.AddCommand(NewRedisInitialPasswordCommand())
|
||||
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
appinformers "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions"
|
||||
reposerverclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
@@ -228,11 +229,12 @@ func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error {
|
||||
|
||||
func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
selector string
|
||||
repoServerAddress string
|
||||
outputFormat string
|
||||
refresh bool
|
||||
clientConfig clientcmd.ClientConfig
|
||||
selector string
|
||||
repoServerAddress string
|
||||
outputFormat string
|
||||
refresh bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
|
||||
var command = &cobra.Command{
|
||||
@@ -270,7 +272,7 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
|
||||
appClientset := appclientset.NewForConfigOrDie(cfg)
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(cfg)
|
||||
result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache)
|
||||
result, err = reconcileApplications(ctx, kubeClientset, appClientset, namespace, repoServerClient, selector, newLiveStateCache, ignoreNormalizerOpts)
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
appClientset := appclientset.NewForConfigOrDie(cfg)
|
||||
@@ -285,6 +287,7 @@ func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
command.Flags().StringVar(&selector, "l", "", "Label selector")
|
||||
command.Flags().StringVar(&outputFormat, "o", "yaml", "Output format (yaml|json)")
|
||||
command.Flags().BoolVar(&refresh, "refresh", false, "If set to true then recalculates apps reconciliation")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
|
||||
return command
|
||||
}
|
||||
@@ -334,6 +337,7 @@ func reconcileApplications(
|
||||
repoServerClient reposerverclient.Clientset,
|
||||
selector string,
|
||||
createLiveStateCache func(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
) ([]appReconcileResult, error) {
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, namespace)
|
||||
argoDB := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
@@ -374,7 +378,7 @@ func reconcileApplications(
|
||||
)
|
||||
|
||||
appStateManager := controller.NewAppStateManager(
|
||||
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false)
|
||||
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false, ignoreNormalizerOpts)
|
||||
|
||||
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
@@ -113,6 +114,7 @@ func TestGetReconcileResults_Refresh(t *testing.T) {
|
||||
func(argoDB db.ArgoDB, appInformer cache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) statecache.LiveStateCache {
|
||||
return &liveStateCache
|
||||
},
|
||||
normalizers.IgnoreNormalizerOpts{},
|
||||
)
|
||||
|
||||
if !assert.NoError(t, err) {
|
||||
|
||||
@@ -3,7 +3,9 @@ package admin
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
|
||||
@@ -14,11 +16,12 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
func NewDashboardCommand() *cobra.Command {
|
||||
func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
port int
|
||||
address string
|
||||
compressionStr string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "dashboard",
|
||||
@@ -28,12 +31,13 @@ func NewDashboardCommand() *cobra.Command {
|
||||
|
||||
compression, err := cache.CompressionTypeFromString(compressionStr)
|
||||
errors.CheckError(err)
|
||||
errors.CheckError(headless.StartLocalServer(ctx, &argocdclient.ClientOptions{Core: true}, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression))
|
||||
clientOpts.Core = true
|
||||
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression, clientConfig))
|
||||
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
|
||||
<-ctx.Done()
|
||||
},
|
||||
}
|
||||
initialize.InitCommand(cmd)
|
||||
clientConfig = cli.AddKubectlFlagsToSet(cmd.Flags())
|
||||
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
cmd.Flags().StringVar(&address, "address", common.DefaultAddressAdminDashboard, "Listen on given address")
|
||||
cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
|
||||
|
||||
98
cmd/argocd/commands/admin/redis_initial_password.go
Normal file
98
cmd/argocd/commands/admin/redis_initial_password.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const defaulRedisInitialPasswordSecretName = "argocd-redis"
|
||||
const defaultResisInitialPasswordKey = "auth"
|
||||
|
||||
func generateRandomPassword() (string, error) {
|
||||
const initialPasswordLength = 16
|
||||
const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
|
||||
randBytes := make([]byte, initialPasswordLength)
|
||||
for i := 0; i < initialPasswordLength; i++ {
|
||||
num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
randBytes[i] = letters[num.Int64()]
|
||||
}
|
||||
initialPassword := string(randBytes)
|
||||
return initialPassword, nil
|
||||
}
|
||||
|
||||
// NewRedisInitialPasswordCommand defines a new command to ensure Argo CD Redis password secret exists.
|
||||
func NewRedisInitialPasswordCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "redis-initial-password",
|
||||
Short: "Ensure the Redis password exists, creating a new one if necessary.",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
errors.CheckError(err)
|
||||
|
||||
redisInitialPasswordSecretName := defaulRedisInitialPasswordSecretName
|
||||
redisInitialPasswordKey := defaultResisInitialPasswordKey
|
||||
fmt.Printf("Checking for initial Redis password in secret %s/%s at key %s. \n", namespace, redisInitialPasswordSecretName, redisInitialPasswordKey)
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
|
||||
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(config)
|
||||
|
||||
randomPassword, err := generateRandomPassword()
|
||||
errors.CheckError(err)
|
||||
|
||||
data := map[string][]byte{
|
||||
redisInitialPasswordKey: []byte(randomPassword),
|
||||
}
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: redisInitialPasswordSecretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: data,
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
}
|
||||
_, err = kubeClientset.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{})
|
||||
if err != nil && !apierr.IsAlreadyExists(err) {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
fmt.Println("Argo CD Redis secret state confirmed: secret name argocd-redis.")
|
||||
secret, err = kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), redisInitialPasswordSecretName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
|
||||
if _, ok := secret.Data[redisInitialPasswordKey]; ok {
|
||||
fmt.Println("Password secret is configured properly.")
|
||||
} else {
|
||||
err := fmt.Errorf("key %s doesn't exist in secret %s. \n", redisInitialPasswordKey, redisInitialPasswordSecretName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
|
||||
return &command
|
||||
}
|
||||
@@ -432,7 +432,7 @@ argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argo
|
||||
// configurations. This requires access to live resources which is not the
|
||||
// purpose of this command. This will just apply jsonPointers and
|
||||
// jqPathExpressions configurations.
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides)
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides, normalizers.IgnoreNormalizerOpts{})
|
||||
errors.CheckError(err)
|
||||
|
||||
normalizedRes := res.DeepCopy()
|
||||
@@ -457,6 +457,9 @@ argocd admin settings resource-overrides ignore-differences ./deploy.yaml --argo
|
||||
}
|
||||
|
||||
func NewResourceIgnoreResourceUpdatesCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var (
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "ignore-resource-updates RESOURCE_YAML_PATH",
|
||||
Short: "Renders fields excluded from resource updates",
|
||||
@@ -478,7 +481,7 @@ argocd admin settings resource-overrides ignore-resource-updates ./deploy.yaml -
|
||||
return
|
||||
}
|
||||
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides)
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(nil, overrides, ignoreNormalizerOpts)
|
||||
errors.CheckError(err)
|
||||
|
||||
normalizedRes := res.DeepCopy()
|
||||
@@ -499,6 +502,7 @@ argocd admin settings resource-overrides ignore-resource-updates ./deploy.yaml -
|
||||
})
|
||||
},
|
||||
}
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/repository"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
@@ -964,14 +965,15 @@ type objKeyLiveTarget struct {
|
||||
// NewApplicationDiffCommand returns a new instance of an `argocd app diff` command
|
||||
func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
exitCode bool
|
||||
local string
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
localIncludes []string
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
exitCode bool
|
||||
local string
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
localIncludes []string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
shortDesc := "Perform a diff against the target and live state."
|
||||
var command = &cobra.Command{
|
||||
@@ -1031,13 +1033,14 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
defer argoio.Close(conn)
|
||||
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
|
||||
errors.CheckError(err)
|
||||
|
||||
diffOption.local = local
|
||||
diffOption.localRepoRoot = localRepoRoot
|
||||
diffOption.cluster = cluster
|
||||
}
|
||||
}
|
||||
proj := getProject(c, clientOpts, ctx, app.Spec.Project)
|
||||
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption)
|
||||
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
if foundDiffs && exitCode {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -1051,6 +1054,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
|
||||
command.Flags().BoolVar(&serverSideGenerate, "server-side-generate", false, "Used with --local, this will send your manifests to the server for diffing")
|
||||
command.Flags().StringArrayVar(&localIncludes, "local-include", []string{"*.yaml", "*.yml", "*.json"}, "Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path.")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1065,7 +1069,7 @@ type DifferenceOption struct {
|
||||
}
|
||||
|
||||
// findandPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
|
||||
func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption) bool {
|
||||
func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) bool {
|
||||
var foundDiffs bool
|
||||
liveObjs, err := cmdutil.LiveObjects(resources.Items)
|
||||
errors.CheckError(err)
|
||||
@@ -1120,7 +1124,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
// compareOptions in the protobuf
|
||||
ignoreAggregatedRoles := false
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, overrides, ignoreAggregatedRoles).
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, overrides, ignoreAggregatedRoles, ignoreNormalizerOpts).
|
||||
WithTracking(argoSettings.AppLabelKey, argoSettings.TrackingMethod).
|
||||
WithNoCache().
|
||||
Build()
|
||||
@@ -1613,6 +1617,8 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
diffChangesConfirm bool
|
||||
projects []string
|
||||
output string
|
||||
appNamespace string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "sync [APPNAME... | -l selector | --project project-name]",
|
||||
@@ -1837,7 +1843,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
fmt.Printf("====== Previewing differences between live and desired state of application %s ======\n", appQualifiedName)
|
||||
|
||||
proj := getProject(c, clientOpts, ctx, app.Spec.Project)
|
||||
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption)
|
||||
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
if foundDiffs {
|
||||
if !diffChangesConfirm {
|
||||
yesno := cli.AskToProceed(fmt.Sprintf("Please review changes to application %s shown above. Do you want to continue the sync process? (y/n): ", appQualifiedName))
|
||||
@@ -1895,6 +1901,8 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&diffChanges, "preview-changes", false, "Preview difference against the target and live state before syncing app and wait for user confirmation")
|
||||
command.Flags().StringArrayVar(&projects, "project", []string{}, "Sync apps that belong to the specified projects. This option may be specified repeatedly.")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only sync an application in namespace")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout", normalizers.DefaultJQExecutionTimeout, "Set ignore normalizer JQ execution timeout")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -142,16 +142,25 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
|
||||
}
|
||||
defer io.Close(closer)
|
||||
_, err = versionClient.Version(ctx, &empty.Empty{})
|
||||
return fmt.Errorf("failed to get version: %w", err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get version: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartLocalServer allows executing command in a headless mode: on the fly starts Argo CD API server and
|
||||
// changes provided client options to use started API server port
|
||||
func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType) error {
|
||||
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
|
||||
clientConfig := cli.AddKubectlFlagsToSet(flags)
|
||||
// MaybeStartLocalServer allows executing command in a headless mode. If we're in core mode, starts the Argo CD API
|
||||
// server on the fly and changes provided client options to use started API server port.
|
||||
//
|
||||
// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will
|
||||
// not start the local server.
|
||||
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType, clientConfig clientcmd.ClientConfig) error {
|
||||
if clientConfig == nil {
|
||||
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
|
||||
clientConfig = cli.AddKubectlFlagsToSet(flags)
|
||||
}
|
||||
startInProcessAPI := clientOpts.Core
|
||||
if !startInProcessAPI {
|
||||
// Core mode is enabled on client options. Check the local config to see if we should start the API server.
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading local config: %w", err)
|
||||
@@ -161,9 +170,11 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving context: %w", err)
|
||||
}
|
||||
// There was a local config file, so determine whether core mode is enabled per the config file.
|
||||
startInProcessAPI = configCtx.Server.Core
|
||||
}
|
||||
}
|
||||
// If we're in core mode, start the API server on the fly.
|
||||
if !startInProcessAPI {
|
||||
return nil
|
||||
}
|
||||
@@ -235,6 +246,7 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
if !cache2.WaitForCacheSync(ctx.Done(), srv.Initialized) {
|
||||
log.Fatal("Timed out waiting for project cache to sync")
|
||||
}
|
||||
|
||||
tries := 5
|
||||
for i := 0; i < tries; i++ {
|
||||
err = testAPI(ctx, clientOpts)
|
||||
@@ -243,7 +255,10 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return fmt.Errorf("all retries failed: %w", err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("all retries failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClientOrDie creates a new API client from a set of config options, or fails fatally if the new client creation fails.
|
||||
@@ -251,7 +266,9 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
|
||||
ctx := c.Context()
|
||||
|
||||
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
|
||||
err := StartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone)
|
||||
// If we're in core mode, start the API server on the fly and configure the client `opts` to use it.
|
||||
// If we're not in core mode, this function call will do nothing.
|
||||
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -120,11 +120,16 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
logCtx.Error(err.Error())
|
||||
return strings.TrimSuffix(output, "\n"), err
|
||||
}
|
||||
|
||||
logCtx = logCtx.WithFields(log.Fields{
|
||||
"stderr": stderr.String(),
|
||||
"command": command,
|
||||
})
|
||||
if len(output) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"stderr": stderr.String(),
|
||||
"command": command,
|
||||
}).Warn("Plugin command returned zero output")
|
||||
logCtx.Warn("Plugin command returned zero output")
|
||||
} else {
|
||||
// Log stderr even on successfull commands to help develop plugins
|
||||
logCtx.Info("Plugin command successfull")
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(output, "\n"), nil
|
||||
|
||||
@@ -224,6 +224,8 @@ const (
|
||||
EnvControllerShard = "ARGOCD_CONTROLLER_SHARD"
|
||||
// EnvControllerShardingAlgorithm is the distribution sharding algorithm to be used: legacy or round-robin
|
||||
EnvControllerShardingAlgorithm = "ARGOCD_CONTROLLER_SHARDING_ALGORITHM"
|
||||
//EnvEnableDynamicClusterDistribution enables dynamic sharding (ALPHA)
|
||||
EnvEnableDynamicClusterDistribution = "ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION"
|
||||
// EnvEnableGRPCTimeHistogramEnv enables gRPC metrics collection
|
||||
EnvEnableGRPCTimeHistogramEnv = "ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM"
|
||||
// EnvGithubAppCredsExpirationDuration controls the caching of Github app credentials. This value is in minutes (default: 60)
|
||||
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -53,6 +54,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
@@ -67,7 +69,7 @@ import (
|
||||
|
||||
const (
|
||||
updateOperationStateTimeout = 1 * time.Second
|
||||
defaultDeploymentInformerResyncDuration = 10
|
||||
defaultDeploymentInformerResyncDuration = 10 * time.Second
|
||||
// orphanedIndex contains application which monitor orphaned resources by namespace
|
||||
orphanedIndex = "orphaned"
|
||||
)
|
||||
@@ -126,6 +128,7 @@ type ApplicationController struct {
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
projByNameCache sync.Map
|
||||
applicationNamespaces []string
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
}
|
||||
|
||||
// NewApplicationController creates new instance of ApplicationController.
|
||||
@@ -147,6 +150,7 @@ func NewApplicationController(
|
||||
persistResourceHealth bool,
|
||||
clusterFilter func(cluster *appv1.Cluster) bool,
|
||||
applicationNamespaces []string,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v", appResyncPeriod, appHardResyncPeriod)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
@@ -172,6 +176,7 @@ func NewApplicationController(
|
||||
clusterFilter: clusterFilter,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
ignoreNormalizerOpts: ignoreNormalizerOpts,
|
||||
}
|
||||
if kubectlParallelismLimit > 0 {
|
||||
ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
|
||||
@@ -208,14 +213,18 @@ func NewApplicationController(
|
||||
},
|
||||
})
|
||||
|
||||
factory := informers.NewSharedInformerFactory(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration)
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
|
||||
deploymentInformer := factory.Apps().V1().Deployments()
|
||||
|
||||
readinessHealthCheck := func(r *http.Request) error {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if !kubeerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
}
|
||||
}
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
@@ -242,7 +251,7 @@ func NewApplicationController(
|
||||
}
|
||||
}
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth)
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, ignoreNormalizerOpts)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
@@ -693,7 +702,7 @@ func (ctrl *ApplicationController) hideSecretData(app *appv1.Application, compar
|
||||
return nil, fmt.Errorf("error getting cluster cache: %s", err)
|
||||
}
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles, ctrl.ignoreNormalizerOpts).
|
||||
WithTracking(appLabelKey, trackingMethod).
|
||||
WithNoCache().
|
||||
WithLogger(logutils.NewLogrusLogger(logutils.NewWithCurrentConfig())).
|
||||
@@ -1783,6 +1792,13 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
_, err := argo.SetAppOperation(appIf, app.Name, &op)
|
||||
setOpTime := time.Since(start)
|
||||
if err != nil {
|
||||
if goerrors.Is(err, argo.ErrAnotherOperationInProgress) {
|
||||
// skipping auto-sync because another operation is in progress and was not noticed due to stale data in informer
|
||||
// it is safe to skip auto-sync because it is already running
|
||||
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
@@ -123,6 +124,7 @@ func newFakeController(data *fakeData) *ApplicationController {
|
||||
true,
|
||||
nil,
|
||||
data.applicationNamespaces,
|
||||
normalizers.IgnoreNormalizerOpts{},
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -799,7 +801,7 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
normalized := false
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"}}` {
|
||||
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"},"status":{"sync":{"comparedTo":{"destination":{},"source":{"repoURL":""}}}}}` {
|
||||
normalized = true
|
||||
}
|
||||
}
|
||||
@@ -821,7 +823,7 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
normalized := false
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"}}` {
|
||||
if string(patchAction.GetPatch()) == `{"spec":{"project":"default"},"status":{"sync":{"comparedTo":{"destination":{},"source":{"repoURL":""}}}}}` {
|
||||
normalized = true
|
||||
}
|
||||
}
|
||||
|
||||
20
controller/cache/cache.go
vendored
20
controller/cache/cache.go
vendored
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
@@ -196,14 +197,15 @@ type cacheSettings struct {
|
||||
}
|
||||
|
||||
type liveStateCache struct {
|
||||
db db.ArgoDB
|
||||
appInformer cache.SharedIndexInformer
|
||||
onObjectUpdated ObjectUpdatedHandler
|
||||
kubectl kube.Kubectl
|
||||
settingsMgr *settings.SettingsManager
|
||||
metricsServer *metrics.MetricsServer
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
resourceTracking argo.ResourceTracking
|
||||
db db.ArgoDB
|
||||
appInformer cache.SharedIndexInformer
|
||||
onObjectUpdated ObjectUpdatedHandler
|
||||
kubectl kube.Kubectl
|
||||
settingsMgr *settings.SettingsManager
|
||||
metricsServer *metrics.MetricsServer
|
||||
clusterFilter func(cluster *appv1.Cluster) bool
|
||||
resourceTracking argo.ResourceTracking
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
|
||||
clusters map[string]clustercache.ClusterCache
|
||||
cacheSettings cacheSettings
|
||||
@@ -486,7 +488,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
gvk := un.GroupVersionKind()
|
||||
|
||||
if cacheSettings.ignoreResourceUpdatesEnabled && shouldHashManifest(appName, gvk) {
|
||||
hash, err := generateManifestHash(un, nil, cacheSettings.resourceOverrides)
|
||||
hash, err := generateManifestHash(un, nil, cacheSettings.resourceOverrides, c.ignoreNormalizerOpts)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to generate manifest hash: %v", err)
|
||||
} else {
|
||||
|
||||
4
controller/cache/info.go
vendored
4
controller/cache/info.go
vendored
@@ -390,8 +390,8 @@ func populateHostNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func generateManifestHash(un *unstructured.Unstructured, ignores []v1alpha1.ResourceIgnoreDifferences, overrides map[string]v1alpha1.ResourceOverride) (string, error) {
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(ignores, overrides)
|
||||
func generateManifestHash(un *unstructured.Unstructured, ignores []v1alpha1.ResourceIgnoreDifferences, overrides map[string]v1alpha1.ResourceOverride, opts normalizers.IgnoreNormalizerOpts) (string, error) {
|
||||
normalizer, err := normalizers.NewIgnoreNormalizer(ignores, overrides, opts)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating normalizer: %w", err)
|
||||
}
|
||||
|
||||
3
controller/cache/info_test.go
vendored
3
controller/cache/info_test.go
vendored
@@ -16,6 +16,7 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
func strToUnstructured(jsonStr string) *unstructured.Unstructured {
|
||||
@@ -749,7 +750,7 @@ func TestManifestHash(t *testing.T) {
|
||||
|
||||
expected := hash(data)
|
||||
|
||||
hash, err := generateManifestHash(manifest, ignores, nil)
|
||||
hash, err := generateManifestHash(manifest, ignores, nil, normalizers.IgnoreNormalizerOpts{})
|
||||
assert.Equal(t, expected, hash)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@@ -4,11 +4,12 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/diff"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/gpg"
|
||||
@@ -105,8 +107,13 @@ type appStateManager struct {
|
||||
statusRefreshTimeout time.Duration
|
||||
resourceTracking argo.ResourceTracking
|
||||
persistResourceHealth bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
}
|
||||
|
||||
// getRepoObjs will generate the manifests for the given application delegating the
|
||||
// task to the repo-server. It returns the list of generated manifests as unstructured
|
||||
// objects. It also returns the full response from all calls to the repo server as the
|
||||
// second argument.
|
||||
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {
|
||||
|
||||
ts := stats.NewTimingStats()
|
||||
@@ -391,6 +398,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
now := metav1.Now()
|
||||
|
||||
var manifestInfos []*apiclient.ManifestResponse
|
||||
targetNsExists := false
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
// If the length of revisions is not same as the length of sources,
|
||||
@@ -453,6 +461,13 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
LastTransitionTime: &now,
|
||||
})
|
||||
}
|
||||
|
||||
// If we reach this path, this means that a namespace has been both defined in Git, as well in the
|
||||
// application's managedNamespaceMetadata. We want to ensure that this manifest is the one being used instead
|
||||
// of what is present in managedNamespaceMetadata.
|
||||
if isManagedNamespace(targetObj, app) {
|
||||
targetNsExists = true
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("dedup_ms")
|
||||
|
||||
@@ -511,7 +526,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
// entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a
|
||||
// namespace which we can compare the live namespace with. For that, we'll do the same as is done in
|
||||
// gitops-engine, the difference here being that we create a managed namespace which is only used for comparison.
|
||||
if isManagedNamespace(liveObj, app) {
|
||||
//
|
||||
// targetNsExists == true implies that it already exists as a target, so no need to add the namespace to the
|
||||
// targetObjs array.
|
||||
if isManagedNamespace(liveObj, app) && !targetNsExists {
|
||||
nsSpec := &v1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}}
|
||||
managedNs, err := kubeutil.ToUnstructured(nsSpec)
|
||||
|
||||
@@ -547,21 +565,16 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
manifestRevisions = append(manifestRevisions, manifestInfo.Revision)
|
||||
}
|
||||
|
||||
// restore comparison using cached diff result if previous comparison was performed for the same revision
|
||||
revisionChanged := len(manifestInfos) != len(sources) || !reflect.DeepEqual(app.Status.Sync.Revisions, manifestRevisions)
|
||||
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, v1alpha1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences})
|
||||
|
||||
_, refreshRequested := app.IsRefreshRequested()
|
||||
noCache = noCache || refreshRequested || app.Status.Expired(m.statusRefreshTimeout) || specChanged || revisionChanged
|
||||
useDiffCache := useDiffCache(noCache, manifestInfos, sources, app, manifestRevisions, m.statusRefreshTimeout, logCtx)
|
||||
|
||||
diffConfigBuilder := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).
|
||||
WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles, m.ignoreNormalizerOpts).
|
||||
WithTracking(appLabelKey, string(trackingMethod))
|
||||
|
||||
if noCache {
|
||||
diffConfigBuilder.WithNoCache()
|
||||
if useDiffCache {
|
||||
diffConfigBuilder.WithCache(m.cache, app.InstanceName(m.namespace))
|
||||
} else {
|
||||
diffConfigBuilder.WithCache(m.cache, app.GetName())
|
||||
diffConfigBuilder.WithNoCache()
|
||||
}
|
||||
|
||||
gvkParser, err := m.getGVKParser(app.Spec.Destination.Server)
|
||||
@@ -768,6 +781,46 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
return &compRes
|
||||
}
|
||||
|
||||
// useDiffCache will determine if the diff should be calculated based
|
||||
// on the existing live state cache or not.
|
||||
func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sources []v1alpha1.ApplicationSource, app *v1alpha1.Application, manifestRevisions []string, statusRefreshTimeout time.Duration, log *log.Entry) bool {
|
||||
|
||||
if noCache {
|
||||
log.WithField("useDiffCache", "false").Debug("noCache is true")
|
||||
return false
|
||||
}
|
||||
_, refreshRequested := app.IsRefreshRequested()
|
||||
if refreshRequested {
|
||||
log.WithField("useDiffCache", "false").Debug("refreshRequested")
|
||||
return false
|
||||
}
|
||||
if app.Status.Expired(statusRefreshTimeout) {
|
||||
log.WithField("useDiffCache", "false").Debug("app.status.expired")
|
||||
return false
|
||||
}
|
||||
|
||||
if len(manifestInfos) != len(sources) {
|
||||
log.WithField("useDiffCache", "false").Debug("manifestInfos len != sources len")
|
||||
return false
|
||||
}
|
||||
|
||||
revisionChanged := !reflect.DeepEqual(app.Status.GetRevisions(), manifestRevisions)
|
||||
if revisionChanged {
|
||||
log.WithField("useDiffCache", "false").Debug("revisionChanged")
|
||||
return false
|
||||
}
|
||||
|
||||
currentSpec := app.BuildComparedToStatus()
|
||||
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)
|
||||
if specChanged {
|
||||
log.WithField("useDiffCache", "false").Debug("specChanged")
|
||||
return false
|
||||
}
|
||||
|
||||
log.WithField("useDiffCache", "true").Debug("using diff cache")
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, revisions []string, sources []v1alpha1.ApplicationSource, hasMultipleSources bool, startedAt metav1.Time) error {
|
||||
var nextID int64
|
||||
if len(app.Status.History) > 0 {
|
||||
@@ -821,6 +874,7 @@ func NewAppStateManager(
|
||||
statusRefreshTimeout time.Duration,
|
||||
resourceTracking argo.ResourceTracking,
|
||||
persistResourceHealth bool,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
) AppStateManager {
|
||||
return &appStateManager{
|
||||
liveStateCache: liveStateCache,
|
||||
@@ -836,6 +890,7 @@ func NewAppStateManager(
|
||||
statusRefreshTimeout: statusRefreshTimeout,
|
||||
resourceTracking: resourceTracking,
|
||||
persistResourceHealth: persistResourceHealth,
|
||||
ignoreNormalizerOpts: ignoreNormalizerOpts,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,9 @@ import (
|
||||
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
. "github.com/argoproj/gitops-engine/pkg/utils/testing"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
logrustest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -89,6 +92,122 @@ func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// TestCompareAppStateNamespaceMetadataDiffers tests comparison when managed namespace metadata differs to live and manifest ns
|
||||
func TestCompareAppStateNamespaceMetadataDiffersToManifest(t *testing.T) {
|
||||
ns := NewNamespace()
|
||||
ns.SetName(test.FakeDestNamespace)
|
||||
ns.SetNamespace(test.FakeDestNamespace)
|
||||
ns.SetAnnotations(map[string]string{"bar": "bat"})
|
||||
|
||||
app := newFakeApp()
|
||||
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
app.Status.OperationState = &argoappv1.OperationState{
|
||||
SyncResult: &argoappv1.SyncOperationResult{},
|
||||
}
|
||||
|
||||
liveNs := ns.DeepCopy()
|
||||
liveNs.SetAnnotations(nil)
|
||||
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{toJSON(t, liveNs)},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(ns): ns,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
assert.Len(t, compRes.resources, 1)
|
||||
assert.Len(t, compRes.managedResources, 1)
|
||||
assert.NotNil(t, compRes.diffResultList)
|
||||
assert.Len(t, compRes.diffResultList.Diffs, 1)
|
||||
|
||||
result := NewNamespace()
|
||||
assert.NoError(t, json.Unmarshal(compRes.diffResultList.Diffs[0].PredictedLive, result))
|
||||
|
||||
labels := result.GetLabels()
|
||||
delete(labels, "kubernetes.io/metadata.name")
|
||||
|
||||
assert.Equal(t, map[string]string{}, labels)
|
||||
// Manifests override definitions in managedNamespaceMetadata
|
||||
assert.Equal(t, map[string]string{"bar": "bat"}, result.GetAnnotations())
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// TestCompareAppStateNamespaceMetadata tests comparison when managed namespace metadata differs to live
|
||||
func TestCompareAppStateNamespaceMetadata(t *testing.T) {
|
||||
ns := NewNamespace()
|
||||
ns.SetName(test.FakeDestNamespace)
|
||||
ns.SetNamespace(test.FakeDestNamespace)
|
||||
ns.SetAnnotations(map[string]string{"bar": "bat"})
|
||||
|
||||
app := newFakeApp()
|
||||
app.Spec.SyncPolicy.ManagedNamespaceMetadata = &argoappv1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
app.Status.OperationState = &argoappv1.OperationState{
|
||||
SyncResult: &argoappv1.SyncOperationResult{},
|
||||
}
|
||||
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(ns): ns,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
assert.Len(t, compRes.resources, 1)
|
||||
assert.Len(t, compRes.managedResources, 1)
|
||||
assert.NotNil(t, compRes.diffResultList)
|
||||
assert.Len(t, compRes.diffResultList.Diffs, 1)
|
||||
|
||||
result := NewNamespace()
|
||||
assert.NoError(t, json.Unmarshal(compRes.diffResultList.Diffs[0].PredictedLive, result))
|
||||
|
||||
labels := result.GetLabels()
|
||||
delete(labels, "kubernetes.io/metadata.name")
|
||||
|
||||
assert.Equal(t, map[string]string{"foo": "bar"}, labels)
|
||||
assert.Equal(t, map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true", "bar": "bat", "foo": "bar"}, result.GetAnnotations())
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// TestCompareAppStateNamespaceMetadataIsTheSame tests comparison when managed namespace metadata is the same
|
||||
func TestCompareAppStateNamespaceMetadataIsTheSame(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
@@ -1223,3 +1342,252 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUseDiffCache(t *testing.T) {
|
||||
type fixture struct {
|
||||
testName string
|
||||
noCache bool
|
||||
manifestInfos []*apiclient.ManifestResponse
|
||||
sources []argoappv1.ApplicationSource
|
||||
app *argoappv1.Application
|
||||
manifestRevisions []string
|
||||
statusRefreshTimeout time.Duration
|
||||
expectedUseCache bool
|
||||
}
|
||||
|
||||
manifestInfos := func(revision string) []*apiclient.ManifestResponse {
|
||||
return []*apiclient.ManifestResponse{
|
||||
{
|
||||
Manifests: []string{
|
||||
"{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"labels\":{\"app.kubernetes.io/instance\":\"httpbin\"},\"name\":\"httpbin-svc\",\"namespace\":\"httpbin\"},\"spec\":{\"ports\":[{\"name\":\"http-port\",\"port\":7777,\"targetPort\":80},{\"name\":\"test\",\"port\":333}],\"selector\":{\"app\":\"httpbin\"}}}",
|
||||
"{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"labels\":{\"app.kubernetes.io/instance\":\"httpbin\"},\"name\":\"httpbin-deployment\",\"namespace\":\"httpbin\"},\"spec\":{\"replicas\":2,\"selector\":{\"matchLabels\":{\"app\":\"httpbin\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"httpbin\"}},\"spec\":{\"containers\":[{\"image\":\"kennethreitz/httpbin\",\"imagePullPolicy\":\"Always\",\"name\":\"httpbin\",\"ports\":[{\"containerPort\":80}]}]}}}}",
|
||||
},
|
||||
Namespace: "",
|
||||
Server: "",
|
||||
Revision: revision,
|
||||
SourceType: "Kustomize",
|
||||
VerifyResult: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
sources := func() []argoappv1.ApplicationSource {
|
||||
return []argoappv1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://some-repo.com",
|
||||
Path: "argocd/httpbin",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
app := func(namespace string, revision string, refresh bool, a *argoappv1.Application) *argoappv1.Application {
|
||||
app := &argoappv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "httpbin",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: &argoappv1.ApplicationSource{
|
||||
RepoURL: "https://some-repo.com",
|
||||
Path: "argocd/httpbin",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: argoappv1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "httpbin",
|
||||
},
|
||||
Project: "default",
|
||||
SyncPolicy: &argoappv1.SyncPolicy{
|
||||
SyncOptions: []string{
|
||||
"CreateNamespace=true",
|
||||
"ServerSideApply=true",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: argoappv1.ApplicationStatus{
|
||||
Resources: []argoappv1.ResourceStatus{},
|
||||
Sync: argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: argoappv1.ApplicationSource{
|
||||
RepoURL: "https://some-repo.com",
|
||||
Path: "argocd/httpbin",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: argoappv1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "httpbin",
|
||||
},
|
||||
},
|
||||
Revision: revision,
|
||||
Revisions: []string{},
|
||||
},
|
||||
ReconciledAt: &metav1.Time{
|
||||
Time: time.Now().Add(-time.Hour),
|
||||
},
|
||||
},
|
||||
}
|
||||
if refresh {
|
||||
annotations := make(map[string]string)
|
||||
annotations[argoappv1.AnnotationKeyRefresh] = string(argoappv1.RefreshTypeNormal)
|
||||
app.SetAnnotations(annotations)
|
||||
}
|
||||
if a != nil {
|
||||
err := mergo.Merge(app, a, mergo.WithOverride, mergo.WithOverwriteWithEmptyValue)
|
||||
if err != nil {
|
||||
t.Fatalf("error merging app: %s", err)
|
||||
}
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
cases := []fixture{
|
||||
{
|
||||
testName: "will use diff cache",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", false, nil),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: true,
|
||||
},
|
||||
{
|
||||
testName: "will use diff cache for multisource",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "", false, &argoappv1.Application{
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: nil,
|
||||
Sources: argoappv1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "multisource repo1",
|
||||
},
|
||||
{
|
||||
RepoURL: "multisource repo2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: argoappv1.ApplicationStatus{
|
||||
Resources: []argoappv1.ResourceStatus{},
|
||||
Sync: argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: argoappv1.ApplicationSource{},
|
||||
Sources: argoappv1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "multisource repo1",
|
||||
},
|
||||
{
|
||||
RepoURL: "multisource repo2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Revisions: []string{"rev1", "rev2"},
|
||||
},
|
||||
ReconciledAt: &metav1.Time{
|
||||
Time: time.Now().Add(-time.Hour),
|
||||
},
|
||||
},
|
||||
}),
|
||||
manifestRevisions: []string{"rev1", "rev2"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: true,
|
||||
},
|
||||
{
|
||||
testName: "will return false if nocache is true",
|
||||
noCache: true,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", false, nil),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: false,
|
||||
},
|
||||
{
|
||||
testName: "will return false if requested refresh",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", true, nil),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: false,
|
||||
},
|
||||
{
|
||||
testName: "will return false if status expired",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", false, nil),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Minute,
|
||||
expectedUseCache: false,
|
||||
},
|
||||
{
|
||||
testName: "will return false if there is a new revision",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", false, nil),
|
||||
manifestRevisions: []string{"rev2"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: false,
|
||||
},
|
||||
{
|
||||
testName: "will return false if app spec repo changed",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", false, &argoappv1.Application{
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: &argoappv1.ApplicationSource{
|
||||
RepoURL: "new-repo",
|
||||
},
|
||||
},
|
||||
}),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: false,
|
||||
},
|
||||
{
|
||||
testName: "will return false if app spec IgnoreDifferences changed",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", false, &argoappv1.Application{
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
IgnoreDifferences: []argoappv1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "app/v1",
|
||||
Kind: "application",
|
||||
Name: "httpbin",
|
||||
Namespace: "httpbin",
|
||||
JQPathExpressions: []string{"."},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
// Given
|
||||
t.Parallel()
|
||||
logger, _ := logrustest.NewNullLogger()
|
||||
log := logrus.NewEntry(logger)
|
||||
|
||||
// When
|
||||
useDiffCache := useDiffCache(tc.noCache, tc.manifestInfos, tc.sources, tc.app, tc.manifestRevisions, tc.statusRefreshTimeout, log)
|
||||
|
||||
// Then
|
||||
assert.Equal(t, useDiffCache, tc.expectedUseCache)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
cdcommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
@@ -370,11 +371,10 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
}
|
||||
|
||||
// normalizeTargetResources will apply the diff normalization in all live and target resources.
|
||||
// Then it calculates the merge patch between the normalized live and the current live resources.
|
||||
// Finally it applies the merge patch in the normalized target resources. This is done to ensure
|
||||
// that target resources have the same ignored diff fields values from live ones to avoid them to
|
||||
// be applied in the cluster. Returns the list of normalized target resources.
|
||||
// normalizeTargetResources modifies target resources to ensure ignored fields are not touched during synchronization:
|
||||
// - applies normalization to the target resources based on the live resources
|
||||
// - copies ignored fields from the matching live resources: apply normalizer to the live resource,
|
||||
// calculates the patch performed by normalizer and applies the patch to the target resource
|
||||
func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructured, error) {
|
||||
// normalize live and target resources
|
||||
normalized, err := diff.Normalize(cr.reconciliationResult.Live, cr.reconciliationResult.Target, cr.diffConfig)
|
||||
@@ -393,94 +393,35 @@ func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructure
|
||||
patchedTargets = append(patchedTargets, originalTarget)
|
||||
continue
|
||||
}
|
||||
// calculate targetPatch between normalized and target resource
|
||||
targetPatch, err := getMergePatch(normalizedTarget, originalTarget)
|
||||
|
||||
var lookupPatchMeta *strategicpatch.PatchMetaFromStruct
|
||||
versionedObject, err := scheme.Scheme.New(normalizedTarget.GroupVersionKind())
|
||||
if err == nil {
|
||||
meta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookupPatchMeta = &meta
|
||||
}
|
||||
|
||||
livePatch, err := getMergePatch(normalized.Lives[idx], live, lookupPatchMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check if there is a patch to apply. An empty patch is identified by a '{}' string.
|
||||
if len(targetPatch) > 2 {
|
||||
livePatch, err := getMergePatch(normalized.Lives[idx], live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// generate a minimal patch that uses the fields from targetPatch (template)
|
||||
// with livePatch values
|
||||
patch, err := compilePatch(targetPatch, livePatch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
normalizedTarget, err = applyMergePatch(normalizedTarget, patch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// if there is no patch just use the original target
|
||||
normalizedTarget = originalTarget
|
||||
normalizedTarget, err = applyMergePatch(normalizedTarget, livePatch, versionedObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
patchedTargets = append(patchedTargets, normalizedTarget)
|
||||
}
|
||||
return patchedTargets, nil
|
||||
}
|
||||
|
||||
// compilePatch will generate a patch using the fields from templatePatch with
|
||||
// the values from valuePatch.
|
||||
func compilePatch(templatePatch, valuePatch []byte) ([]byte, error) {
|
||||
templateMap := make(map[string]interface{})
|
||||
err := json.Unmarshal(templatePatch, &templateMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
valueMap := make(map[string]interface{})
|
||||
err = json.Unmarshal(valuePatch, &valueMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultMap := intersectMap(templateMap, valueMap)
|
||||
return json.Marshal(resultMap)
|
||||
}
|
||||
|
||||
// intersectMap will return map with the fields intersection from the 2 provided
|
||||
// maps populated with the valueMap values.
|
||||
func intersectMap(templateMap, valueMap map[string]interface{}) map[string]interface{} {
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range templateMap {
|
||||
if innerTMap, ok := v.(map[string]interface{}); ok {
|
||||
if innerVMap, ok := valueMap[k].(map[string]interface{}); ok {
|
||||
result[k] = intersectMap(innerTMap, innerVMap)
|
||||
}
|
||||
} else if innerTSlice, ok := v.([]interface{}); ok {
|
||||
if innerVSlice, ok := valueMap[k].([]interface{}); ok {
|
||||
items := []interface{}{}
|
||||
for idx, innerTSliceValue := range innerTSlice {
|
||||
if idx < len(innerVSlice) {
|
||||
if tSliceValueMap, ok := innerTSliceValue.(map[string]interface{}); ok {
|
||||
if vSliceValueMap, ok := innerVSlice[idx].(map[string]interface{}); ok {
|
||||
item := intersectMap(tSliceValueMap, vSliceValueMap)
|
||||
items = append(items, item)
|
||||
}
|
||||
} else {
|
||||
items = append(items, innerVSlice[idx])
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(items) > 0 {
|
||||
result[k] = items
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if _, ok := valueMap[k]; ok {
|
||||
result[k] = valueMap[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// getMergePatch calculates and returns the patch between the original and the
|
||||
// modified unstructures.
|
||||
func getMergePatch(original, modified *unstructured.Unstructured) ([]byte, error) {
|
||||
func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMeta *strategicpatch.PatchMetaFromStruct) ([]byte, error) {
|
||||
originalJSON, err := original.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -489,20 +430,30 @@ func getMergePatch(original, modified *unstructured.Unstructured) ([]byte, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lookupPatchMeta != nil {
|
||||
return strategicpatch.CreateThreeWayMergePatch(modifiedJSON, modifiedJSON, originalJSON, lookupPatchMeta, true)
|
||||
}
|
||||
|
||||
return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// applyMergePatch will apply the given patch in the obj and return the patched
|
||||
// unstructure.
|
||||
func applyMergePatch(obj *unstructured.Unstructured, patch []byte) (*unstructured.Unstructured, error) {
|
||||
func applyMergePatch(obj *unstructured.Unstructured, patch []byte, versionedObject interface{}) (*unstructured.Unstructured, error) {
|
||||
originalJSON, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patchedJSON, err := jsonpatch.MergePatch(originalJSON, patch)
|
||||
var patchedJSON []byte
|
||||
if versionedObject == nil {
|
||||
patchedJSON, err = jsonpatch.MergePatch(originalJSON, patch)
|
||||
} else {
|
||||
patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, patch, versionedObject)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
patchedObj := &unstructured.Unstructured{}
|
||||
_, _, err = unstructured.UnstructuredJSONScheme.Decode(patchedJSON, nil, patchedObj)
|
||||
if err != nil {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
)
|
||||
|
||||
func TestPersistRevisionHistory(t *testing.T) {
|
||||
@@ -261,7 +262,7 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
setup := func(t *testing.T, ignores []v1alpha1.ResourceIgnoreDifferences) *fixture {
|
||||
t.Helper()
|
||||
dc, err := diff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(ignores, nil, true).
|
||||
WithDiffSettings(ignores, nil, true, normalizers.IgnoreNormalizerOpts{}).
|
||||
WithNoCache().
|
||||
Build()
|
||||
require.NoError(t, err)
|
||||
@@ -386,3 +387,207 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
assert.Equal(t, 2, len(containers))
|
||||
})
|
||||
}
|
||||
|
||||
func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
type fixture struct {
|
||||
comparisonResult *comparisonResult
|
||||
}
|
||||
setupHttpProxy := func(t *testing.T, ignores []v1alpha1.ResourceIgnoreDifferences) *fixture {
|
||||
t.Helper()
|
||||
dc, err := diff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(ignores, nil, true, normalizers.IgnoreNormalizerOpts{}).
|
||||
WithNoCache().
|
||||
Build()
|
||||
require.NoError(t, err)
|
||||
live := test.YamlToUnstructured(testdata.LiveHTTPProxy)
|
||||
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
|
||||
return &fixture{
|
||||
&comparisonResult{
|
||||
reconciliationResult: sync.ReconciliationResult{
|
||||
Live: []*unstructured.Unstructured{live},
|
||||
Target: []*unstructured.Unstructured{target},
|
||||
},
|
||||
diffConfig: dc,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("will properly ignore nested fields within arrays", func(t *testing.T) {
|
||||
// given
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "projectcontour.io",
|
||||
Kind: "HTTPProxy",
|
||||
JQPathExpressions: []string{".spec.routes[]"},
|
||||
//JSONPointers: []string{"/spec/routes"},
|
||||
},
|
||||
}
|
||||
f := setupHttpProxy(t, ignores)
|
||||
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
patchedTargets, err := normalizeTargetResources(f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(f.comparisonResult.reconciliationResult.Live))
|
||||
require.Equal(t, 1, len(f.comparisonResult.reconciliationResult.Target))
|
||||
require.Equal(t, 1, len(patchedTargets))
|
||||
|
||||
// live should have 1 entry
|
||||
require.Equal(t, 1, len(dig[[]any](f.comparisonResult.reconciliationResult.Live[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors"})))
|
||||
// assert some arbitrary field to show `entries[0]` is not an empty object
|
||||
require.Equal(t, "sample-header", dig[string](f.comparisonResult.reconciliationResult.Live[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries", 0, "requestHeader", "headerName"}))
|
||||
|
||||
// target has 2 entries
|
||||
require.Equal(t, 2, len(dig[[]any](f.comparisonResult.reconciliationResult.Target[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries"})))
|
||||
// assert some arbitrary field to show `entries[0]` is not an empty object
|
||||
require.Equal(t, "sample-header", dig[string](f.comparisonResult.reconciliationResult.Target[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries", 0, "requestHeaderValueMatch", "headers", 0, "name"}))
|
||||
|
||||
// It should be *1* entries in the array
|
||||
require.Equal(t, 1, len(dig[[]any](patchedTargets[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors"})))
|
||||
// and it should NOT equal an empty object
|
||||
require.Len(t, dig[any](patchedTargets[0].Object, []interface{}{"spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries", 0}), 1)
|
||||
|
||||
})
|
||||
t.Run("will correctly set array entries if new entries have been added", func(t *testing.T) {
|
||||
// given
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
JQPathExpressions: []string{".spec.template.spec.containers[].env[] | select(.name == \"SOME_ENV_VAR\")"},
|
||||
},
|
||||
}
|
||||
f := setupHttpProxy(t, ignores)
|
||||
live := test.YamlToUnstructured(testdata.LiveDeploymentEnvVarsYaml)
|
||||
target := test.YamlToUnstructured(testdata.TargetDeploymentEnvVarsYaml)
|
||||
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(targets))
|
||||
containers, ok, err := unstructured.NestedSlice(targets[0].Object, "spec", "template", "spec", "containers")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 1, len(containers))
|
||||
|
||||
ports := containers[0].(map[string]interface{})["ports"].([]interface{})
|
||||
assert.Equal(t, 1, len(ports))
|
||||
|
||||
env := containers[0].(map[string]interface{})["env"].([]interface{})
|
||||
assert.Equal(t, 3, len(env))
|
||||
|
||||
first := env[0]
|
||||
second := env[1]
|
||||
third := env[2]
|
||||
|
||||
// Currently the defined order at this time is the insertion order of the target manifest.
|
||||
assert.Equal(t, "SOME_ENV_VAR", first.(map[string]interface{})["name"])
|
||||
assert.Equal(t, "some_value", first.(map[string]interface{})["value"])
|
||||
|
||||
assert.Equal(t, "SOME_OTHER_ENV_VAR", second.(map[string]interface{})["name"])
|
||||
assert.Equal(t, "some_other_value", second.(map[string]interface{})["value"])
|
||||
|
||||
assert.Equal(t, "YET_ANOTHER_ENV_VAR", third.(map[string]interface{})["name"])
|
||||
assert.Equal(t, "yet_another_value", third.(map[string]interface{})["value"])
|
||||
})
|
||||
|
||||
t.Run("ignore-deployment-image-replicas-changes-additive", func(t *testing.T) {
|
||||
// given
|
||||
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
JSONPointers: []string{"/spec/replicas"},
|
||||
}, {
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
JQPathExpressions: []string{".spec.template.spec.containers[].image"},
|
||||
},
|
||||
}
|
||||
f := setupHttpProxy(t, ignores)
|
||||
live := test.YamlToUnstructured(testdata.MinimalImageReplicaDeploymentYaml)
|
||||
target := test.YamlToUnstructured(testdata.AdditionalImageReplicaDeploymentYaml)
|
||||
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(targets))
|
||||
metadata, ok, err := unstructured.NestedMap(targets[0].Object, "metadata")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
labels, ok := metadata["labels"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 2, len(labels))
|
||||
assert.Equal(t, "web", labels["appProcess"])
|
||||
|
||||
spec, ok, err := unstructured.NestedMap(targets[0].Object, "spec")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, int64(1), spec["replicas"])
|
||||
|
||||
template, ok := spec["template"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
|
||||
tMetadata, ok := template["metadata"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
tLabels, ok := tMetadata["labels"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 2, len(tLabels))
|
||||
assert.Equal(t, "web", tLabels["appProcess"])
|
||||
|
||||
tSpec, ok := template["spec"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
containers, ok, err := unstructured.NestedSlice(tSpec, "containers")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 1, len(containers))
|
||||
|
||||
first := containers[0].(map[string]interface{})
|
||||
assert.Equal(t, "alpine:3", first["image"])
|
||||
|
||||
resources, ok := first["resources"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
requests, ok := resources["requests"].(map[string]interface{})
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "400m", requests["cpu"])
|
||||
|
||||
env, ok, err := unstructured.NestedSlice(first, "env")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, 1, len(env))
|
||||
|
||||
env0 := env[0].(map[string]interface{})
|
||||
assert.Equal(t, "EV", env0["name"])
|
||||
assert.Equal(t, "here", env0["value"])
|
||||
})
|
||||
}
|
||||
|
||||
func dig[T any](obj interface{}, path []interface{}) T {
|
||||
i := obj
|
||||
|
||||
for _, segment := range path {
|
||||
switch segment.(type) {
|
||||
case int:
|
||||
i = i.([]interface{})[segment.(int)]
|
||||
case string:
|
||||
i = i.(map[string]interface{})[segment.(string)]
|
||||
default:
|
||||
panic("invalid path for object")
|
||||
}
|
||||
}
|
||||
|
||||
return i.(T)
|
||||
}
|
||||
|
||||
28
controller/testdata/additional-image-replicas-deployment.yaml
vendored
Normal file
28
controller/testdata/additional-image-replicas-deployment.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
appProcess: web
|
||||
name: client
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: client
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
appProcess: web
|
||||
spec:
|
||||
containers:
|
||||
- image: alpine:2
|
||||
name: alpine
|
||||
resources:
|
||||
requests:
|
||||
cpu: 400m
|
||||
env:
|
||||
- name: EV
|
||||
value: here
|
||||
18
controller/testdata/data.go
vendored
18
controller/testdata/data.go
vendored
@@ -11,4 +11,22 @@ var (
|
||||
|
||||
//go:embed target-deployment-new-entries.yaml
|
||||
TargetDeploymentNewEntries string
|
||||
|
||||
//go:embed live-httpproxy.yaml
|
||||
LiveHTTPProxy string
|
||||
|
||||
//go:embed target-httpproxy.yaml
|
||||
TargetHTTPProxy string
|
||||
|
||||
//go:embed live-deployment-env-vars.yaml
|
||||
LiveDeploymentEnvVarsYaml string
|
||||
|
||||
//go:embed target-deployment-env-vars.yaml
|
||||
TargetDeploymentEnvVarsYaml string
|
||||
|
||||
//go:embed minimal-image-replicas-deployment.yaml
|
||||
MinimalImageReplicaDeploymentYaml string
|
||||
|
||||
//go:embed additional-image-replicas-deployment.yaml
|
||||
AdditionalImageReplicaDeploymentYaml string
|
||||
)
|
||||
|
||||
177
controller/testdata/live-deployment-env-vars.yaml
vendored
Normal file
177
controller/testdata/live-deployment-env-vars.yaml
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui'
|
||||
deployment.kubernetes.io/revision: '9'
|
||||
iksm-version: '2.0'
|
||||
kubectl.kubernetes.io/last-applied-configuration: >
|
||||
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}}
|
||||
creationTimestamp: '2022-01-05T15:45:21Z'
|
||||
generation: 119
|
||||
managedFields:
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:iksm-version': {}
|
||||
manager: janitor
|
||||
operation: Apply
|
||||
time: '2022-01-06T18:21:04Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
.: {}
|
||||
'f:argocd.argoproj.io/tracking-id': {}
|
||||
'f:kubectl.kubernetes.io/last-applied-configuration': {}
|
||||
'f:spec':
|
||||
'f:progressDeadlineSeconds': {}
|
||||
'f:replicas': {}
|
||||
'f:revisionHistoryLimit': {}
|
||||
'f:selector': {}
|
||||
'f:strategy':
|
||||
'f:rollingUpdate':
|
||||
.: {}
|
||||
'f:maxSurge': {}
|
||||
'f:maxUnavailable': {}
|
||||
'f:type': {}
|
||||
'f:template':
|
||||
'f:metadata':
|
||||
'f:labels':
|
||||
.: {}
|
||||
'f:app': {}
|
||||
'f:spec':
|
||||
'f:containers':
|
||||
'k:{"name":"guestbook-ui"}':
|
||||
.: {}
|
||||
'f:env':
|
||||
.: {}
|
||||
'k:{"name":"SOME_ENV_VAR"}':
|
||||
.: {}
|
||||
'f:name': {}
|
||||
'f:value': {}
|
||||
'f:image': {}
|
||||
'f:imagePullPolicy': {}
|
||||
'f:name': {}
|
||||
'f:ports':
|
||||
.: {}
|
||||
'k:{"containerPort":80,"protocol":"TCP"}':
|
||||
.: {}
|
||||
'f:containerPort': {}
|
||||
'f:protocol': {}
|
||||
'f:resources':
|
||||
.: {}
|
||||
'f:requests':
|
||||
.: {}
|
||||
'f:cpu': {}
|
||||
'f:memory': {}
|
||||
'f:terminationMessagePath': {}
|
||||
'f:terminationMessagePolicy': {}
|
||||
'f:dnsPolicy': {}
|
||||
'f:restartPolicy': {}
|
||||
'f:schedulerName': {}
|
||||
'f:securityContext': {}
|
||||
'f:terminationGracePeriodSeconds': {}
|
||||
manager: argocd
|
||||
operation: Update
|
||||
time: '2022-01-06T15:04:15Z'
|
||||
- apiVersion: apps/v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
'f:metadata':
|
||||
'f:annotations':
|
||||
'f:deployment.kubernetes.io/revision': {}
|
||||
'f:status':
|
||||
'f:availableReplicas': {}
|
||||
'f:conditions':
|
||||
.: {}
|
||||
'k:{"type":"Available"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'k:{"type":"Progressing"}':
|
||||
.: {}
|
||||
'f:lastTransitionTime': {}
|
||||
'f:lastUpdateTime': {}
|
||||
'f:message': {}
|
||||
'f:reason': {}
|
||||
'f:status': {}
|
||||
'f:type': {}
|
||||
'f:observedGeneration': {}
|
||||
'f:readyReplicas': {}
|
||||
'f:replicas': {}
|
||||
'f:updatedReplicas': {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
time: '2022-01-06T18:15:14Z'
|
||||
name: kustomize-guestbook-ui
|
||||
namespace: default
|
||||
resourceVersion: '8289211'
|
||||
uid: ef253575-ce44-4c5e-84ad-16e81d0df6eb
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 4
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: SOME_ENV_VAR
|
||||
value: some_value
|
||||
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
availableReplicas: 4
|
||||
conditions:
|
||||
- lastTransitionTime: '2022-01-05T22:20:37Z'
|
||||
lastUpdateTime: '2022-01-05T22:43:47Z'
|
||||
message: >-
|
||||
ReplicaSet "kustomize-guestbook-ui-6549d54677" has successfully
|
||||
progressed.
|
||||
reason: NewReplicaSetAvailable
|
||||
status: 'True'
|
||||
type: Progressing
|
||||
- lastTransitionTime: '2022-01-06T18:15:14Z'
|
||||
lastUpdateTime: '2022-01-06T18:15:14Z'
|
||||
message: Deployment has minimum availability.
|
||||
reason: MinimumReplicasAvailable
|
||||
status: 'True'
|
||||
type: Available
|
||||
observedGeneration: 119
|
||||
readyReplicas: 4
|
||||
replicas: 4
|
||||
updatedReplicas: 4
|
||||
14
controller/testdata/live-httpproxy.yaml
vendored
Normal file
14
controller/testdata/live-httpproxy.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: projectcontour.io/v1
|
||||
kind: HTTPProxy
|
||||
metadata:
|
||||
name: my-http-proxy
|
||||
namespace: default
|
||||
spec:
|
||||
routes:
|
||||
- rateLimitPolicy:
|
||||
global:
|
||||
descriptors:
|
||||
- entries:
|
||||
- requestHeader:
|
||||
descriptorKey: sample-key
|
||||
headerName: sample-header
|
||||
21
controller/testdata/minimal-image-replicas-deployment.yaml
vendored
Normal file
21
controller/testdata/minimal-image-replicas-deployment.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
name: client
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: client
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: client
|
||||
spec:
|
||||
containers:
|
||||
- image: alpine:3
|
||||
name: alpine
|
||||
resources: {}
|
||||
35
controller/testdata/target-deployment-env-vars.yaml
vendored
Normal file
35
controller/testdata/target-deployment-env-vars.yaml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui'
|
||||
iksm-version: '1.0'
|
||||
name: kustomize-guestbook-ui
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: guestbook-ui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook-ui
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: SOME_OTHER_ENV_VAR
|
||||
value: some_other_value
|
||||
- name: YET_ANOTHER_ENV_VAR
|
||||
value: yet_another_value
|
||||
- name: SOME_ENV_VAR
|
||||
value: different_value!
|
||||
image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1'
|
||||
name: guestbook-ui
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
23
controller/testdata/target-httpproxy.yaml
vendored
Normal file
23
controller/testdata/target-httpproxy.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: projectcontour.io/v1
|
||||
kind: HTTPProxy
|
||||
metadata:
|
||||
name: my-http-proxy
|
||||
namespace: default
|
||||
spec:
|
||||
routes:
|
||||
- rateLimitPolicy:
|
||||
global:
|
||||
descriptors:
|
||||
- entries:
|
||||
- requestHeaderValueMatch:
|
||||
headers:
|
||||
- contains: sample-key
|
||||
name: sample-header
|
||||
value: third
|
||||
- requestHeader:
|
||||
descriptorKey: sample-key
|
||||
headerName: sample-header
|
||||
- entries:
|
||||
- requestHeader:
|
||||
descriptorKey: sample-key
|
||||
headerName: sample-header
|
||||
42
docs/faq.md
42
docs/faq.md
@@ -259,3 +259,45 @@ The most common instance of this error is with `env:` fields for `containers`.
|
||||
|
||||
!!! note "Dynamic applications"
|
||||
It's possible that your application is being generated by a tool in which case the duplication might not be evident within the scope of a single file. If you have trouble debugging this problem, consider filing a ticket to the owner of the generator tool asking them to improve its validation and error reporting.
|
||||
|
||||
## How to rotate Redis secret?
|
||||
* Delete `argocd-redis` secret in the namespace where Argo CD is installed.
|
||||
```bash
|
||||
kubectl delete secret argocd-redis -n <argocd namesapce>
|
||||
```
|
||||
* If you are running Redis in HA mode, restart Redis in HA.
|
||||
```bash
|
||||
kubectl rollout restart deployment argocd-redis-ha-haproxy
|
||||
kubectl rollout restart statefulset argocd-redis-ha-server
|
||||
```
|
||||
* If you are running Redis in non-HA mode, restart Redis.
|
||||
```bash
|
||||
kubectl rollout restart deployment argocd-redis
|
||||
```
|
||||
* Restart other components.
|
||||
```bash
|
||||
kubectl rollout restart deployment argocd-server argocd-repo-server
|
||||
kubectl rollout restart statefulset argocd-application-controller
|
||||
```
|
||||
|
||||
## How to turn off Redis auth if users really want to?
|
||||
|
||||
Argo CD default installation is now configured automatically enable Redis authentication.
|
||||
If for some reason authenticated Redis does not work for you and you want to use non-authenticated Redis, here are the steps:
|
||||
|
||||
* You need to have your own Redis installation.
|
||||
* Configure Argo CD to use your own Redis instance. See this [doc](https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cmd-params-cm-yaml/) for the Argo CD configuration.
|
||||
* If you already installed Redis shipped with Argo CD, you also need to clean up the existing components:
|
||||
* When HA Redis is used:
|
||||
* kubectl delete deployment argocd-redis-ha-haproxy
|
||||
* kubectl delete statefulset argocd-redis-ha-server
|
||||
* When non-HA Redis is used:
|
||||
* kubectl delete deployment argocd-redis
|
||||
* Remove environment variable `REDIS_PASSWORD` from the following manifests
|
||||
* Deployment: argocd-repo-server:
|
||||
* Deployment: argocd-server
|
||||
* StatefulSet: argocd-application-controller
|
||||
|
||||
## How do I provide my own Redis credentials?
|
||||
The Redis password is stored in Kubernetes secret `argocd-redis` with key `auth` in the namespace where Argo CD is installed.
|
||||
You can config your secret provider to generate Kubernetes secret accordingly.
|
||||
@@ -38,6 +38,9 @@ Do one of:
|
||||
|
||||
Use `argocd login --core` to [configure](./user-guide/commands/argocd_login.md) CLI access and skip steps 3-5.
|
||||
|
||||
!!! note
|
||||
This default installation for Redis is using password authentication. The Redis password is stored in Kubernetes secret `argocd-redis` with key `auth` in the namespace where Argo CD is installed.
|
||||
|
||||
## 2. Download Argo CD CLI
|
||||
|
||||
Download the latest Argo CD version from [https://github.com/argoproj/argo-cd/releases/latest](https://github.com/argoproj/argo-cd/releases/latest). More detailed installation instructions can be found via the [CLI installation documentation](cli_installation.md).
|
||||
|
||||
@@ -71,6 +71,8 @@ We supply a `ClusterRole` and `ClusterRoleBinding` suitable for this purpose in
|
||||
kubectl apply -k examples/k8s-rbac/argocd-server-applications/
|
||||
```
|
||||
|
||||
`argocd-notifications-controller-rbac-clusterrole.yaml` and `argocd-notifications-controller-rbac-clusterrolebinding.yaml` are used to support notifications controller to notify apps in all namespaces.
|
||||
|
||||
!!! note
|
||||
At some later point in time, we may make this cluster role part of the default installation manifests.
|
||||
|
||||
|
||||
@@ -33,6 +33,6 @@ spec:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
- name: some-app
|
||||
jqExpressions:
|
||||
jqPathExpressions:
|
||||
- .spec.source.helm.values
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ These settings allow you to exert control over when, and how, changes are made t
|
||||
|
||||
Here are some of the controller settings that may be modified to alter the ApplicationSet controller's resource-handling behaviour.
|
||||
|
||||
### Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
|
||||
## Dry run: prevent ApplicationSet from creating, modifying, or deleting all Applications
|
||||
|
||||
To prevent the ApplicationSet controller from creating, modifying, or deleting any `Application` resources, you may enable `dry-run` mode. This essentially switches the controller into a "read only" mode, where the controller Reconcile loop will run, but no resources will be modified.
|
||||
|
||||
@@ -14,7 +14,7 @@ To enable dry-run, add `--dryrun true` to the ApplicationSet Deployment's contai
|
||||
|
||||
See 'How to modify ApplicationSet container parameters' below for detailed steps on how to add this parameter to the controller.
|
||||
|
||||
### Managed Applications modification Policies
|
||||
## Managed Applications modification Policies
|
||||
|
||||
The ApplicationSet controller supports a parameter `--policy`, which is specified on launch (within the controller Deployment container), and which restricts what types of modifications will be made to managed Argo CD `Application` resources.
|
||||
|
||||
@@ -41,7 +41,7 @@ If the controller parameter `--policy` is set, it takes precedence on the field
|
||||
|
||||
This does not prevent deletion of Applications if the ApplicationSet is deleted
|
||||
|
||||
#### Controller parameter
|
||||
### Controller parameter
|
||||
|
||||
To allow the ApplicationSet controller to *create* `Application` resources, but prevent any further modification, such as deletion, or modification of Application fields, add this parameter in the ApplicationSet controller:
|
||||
```
|
||||
@@ -59,7 +59,7 @@ spec:
|
||||
applicationsSync: create-only
|
||||
```
|
||||
|
||||
### Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
|
||||
## Policy - `create-update`: Prevent ApplicationSet controller from deleting Applications
|
||||
|
||||
To allow the ApplicationSet controller to create or modify `Application` resources, but prevent Applications from being deleted, add the following parameter to the ApplicationSet controller `Deployment`:
|
||||
```
|
||||
@@ -79,7 +79,7 @@ spec:
|
||||
applicationsSync: create-update
|
||||
```
|
||||
|
||||
### Ignore certain changes to Applications
|
||||
## Ignore certain changes to Applications
|
||||
|
||||
The ApplicationSet spec includes an `ignoreApplicationDifferences` field, which allows you to specify which fields of
|
||||
the ApplicationSet should be ignored when comparing Applications.
|
||||
@@ -98,11 +98,94 @@ spec:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
- name: some-app
|
||||
jqExpressions:
|
||||
jqPathExpressions:
|
||||
- .spec.source.helm.values
|
||||
```
|
||||
|
||||
### Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
|
||||
### Allow temporarily toggling auto-sync
|
||||
|
||||
One of the most common use cases for ignoring differences is to allow temporarily toggling auto-sync for an Application.
|
||||
|
||||
For example, if you have an ApplicationSet that is configured to automatically sync Applications, you may want to temporarily
|
||||
disable auto-sync for a specific Application. You can do this by adding an ignore rule for the `spec.syncPolicy.automated` field.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
spec:
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/syncPolicy
|
||||
```
|
||||
|
||||
### Limitations of `ignoreApplicationDifferences`
|
||||
|
||||
When an ApplicationSet is reconciled, the controller will compare the ApplicationSet spec with the spec of each Application
|
||||
that it manages. If there are any differences, the controller will generate a patch to update the Application to match the
|
||||
ApplicationSet spec.
|
||||
|
||||
The generated patch is a MergePatch. According to the MergePatch documentation, "existing lists will be completely
|
||||
replaced by new lists" when there is a change to the list.
|
||||
|
||||
This limits the effectiveness of `ignoreApplicationDifferences` when the ignored field is in a list. For example, if you
|
||||
have an application with multiple sources, and you want to ignore changes to the `targetRevision` of one of the sources,
|
||||
changes in other fields or in other sources will cause the entire `sources` list to be replaced, and the `targetRevision`
|
||||
field will be reset to the value defined in the ApplicationSet.
|
||||
|
||||
For example, consider this ApplicationSet:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
spec:
|
||||
ignoreApplicationDifferences:
|
||||
- jqPathExpressions:
|
||||
- .spec.sources[] | select(.repoURL == "https://git.example.com/org/repo1").targetRevision
|
||||
template:
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: main
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
You can freely change the `targetRevision` of the `repo1` source, and the ApplicationSet controller will not overwrite
|
||||
your change.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: fix/bug-123
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
However, if you change the `targetRevision` of the `repo2` source, the ApplicationSet controller will overwrite the entire
|
||||
`sources` field.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/org/repo1
|
||||
targetRevision: main
|
||||
- repoURL: https://git.example.com/org/repo2
|
||||
targetRevision: main
|
||||
```
|
||||
|
||||
!!! note
|
||||
[Future improvements](https://github.com/argoproj/argo-cd/issues/15975) to the ApplicationSet controller may
|
||||
eliminate this problem. For example, the `ref` field might be made a merge key, allowing the ApplicationSet
|
||||
controller to generate and use a StrategicMergePatch instead of a MergePatch. You could then target a specific
|
||||
source by `ref`, ignore changes to a field in that source, and changes to other sources would not cause the ignored
|
||||
field to be overwritten.
|
||||
|
||||
## Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
|
||||
|
||||
By default, when an `Application` resource is deleted by the ApplicationSet controller, all of the child resources of the Application will be deleted as well (such as, all of the Application's `Deployments`, `Services`, etc).
|
||||
|
||||
@@ -119,7 +202,7 @@ spec:
|
||||
More information on the specific behaviour of `preserveResourcesOnDeletion`, and deletion in ApplicationSet controller and Argo CD in general, can be found on the [Application Deletion](Application-Deletion.md) page.
|
||||
|
||||
|
||||
### Prevent an Application's child resources from being modified
|
||||
## Prevent an Application's child resources from being modified
|
||||
|
||||
Changes made to the ApplicationSet will propagate to the Applications managed by the ApplicationSet, and then Argo CD will propagate the Application changes to the underlying cluster resources (as per [Argo CD Integration](Argo-CD-Integration.md)).
|
||||
|
||||
@@ -185,6 +268,11 @@ kubectl apply -n argocd -f install.yaml
|
||||
|
||||
## Preserving changes made to an Applications annotations and labels
|
||||
|
||||
!!! note
|
||||
The same behavior can be achieved on a per-app basis using the [`ignoreApplicationDifferences`](#ignore-certain-changes-to-applications)
|
||||
feature described above. However, preserved fields may be configured globally, a feature that is not yet available
|
||||
for `ignoreApplicationDifferences`.
|
||||
|
||||
It is common practice in Kubernetes to store state in annotations, operators will often make use of this. To allow for this, it is possible to configure a list of annotations that the ApplicationSet should preserve when reconciling.
|
||||
|
||||
For example, imagine that we have an Application created from an ApplicationSet, but a custom annotation and label has since been added (to the Application) that does not exist in the `ApplicationSet` resource:
|
||||
@@ -220,3 +308,18 @@ By default, the Argo CD notifications and the Argo CD refresh type annotations a
|
||||
!!!note
|
||||
One can also set global preserved fields for the controller by passing a comma separated list of annotations and labels to
|
||||
`ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS` and `ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS` respectively.
|
||||
|
||||
## Debugging unexpected changes to Applications
|
||||
|
||||
When the ApplicationSet controller makes a change to an application, it logs the patch at the debug level. To see these
|
||||
logs, set the log level to debug in the `argocd-cmd-params-cm` ConfigMap in the `argocd` namespace:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
namespace: argocd
|
||||
data:
|
||||
applicationsetcontroller.log.level: debug
|
||||
```
|
||||
|
||||
@@ -174,6 +174,18 @@ It is also possible to use Sprig functions to construct the path variables manua
|
||||
| `{{path.filenameNormalized}}` | `{{.path.filenameNormalized}}` | `{{normalize .path.filename}}` |
|
||||
| `{{path[N]}}` | `-` | `{{index .path.segments N}}` |
|
||||
|
||||
## Available template functions
|
||||
|
||||
ApplicationSet controller provides:
|
||||
|
||||
- all [sprig](http://masterminds.github.io/sprig/) Go templates function except `env`, `expandenv` and `getHostByName`
|
||||
- `normalize`: sanitizes the input so that it complies with the following rules:
|
||||
1. contains no more than 253 characters
|
||||
2. contains only lowercase alphanumeric characters, '-' or '.'
|
||||
3. starts and ends with an alphanumeric character
|
||||
- `toYaml` / `fromYaml` / `fromYamlArray` helm like functions
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Go template usage
|
||||
|
||||
@@ -72,6 +72,9 @@ data:
|
||||
server.rootpath: ""
|
||||
# Directory path that contains additional static assets
|
||||
server.staticassets: "/shared/app"
|
||||
# Semicolon-separated list of content types allowed on non-GET requests. Set an empty string to allow all. Be aware
|
||||
# that allowing content types besides application/json may make your API more vulnerable to CSRF attacks.
|
||||
server.api.content.types: "application/json"
|
||||
|
||||
# Set the logging format. One of: text|json (default "text")
|
||||
server.log.format: "text"
|
||||
|
||||
@@ -44,6 +44,7 @@ spec:
|
||||
args: [-c, 'echo "Initializing..."']
|
||||
# The generate command runs in the Application source directory each time manifests are generated. Standard output
|
||||
# must be ONLY valid Kubernetes Objects in either YAML or JSON. A non-zero exit code will fail manifest generation.
|
||||
# To write log messages from the command, write them to stderr, it will always be displayed.
|
||||
# Error output will be sent to the UI, so avoid printing sensitive information (such as secrets).
|
||||
generate:
|
||||
command: [sh, -c]
|
||||
@@ -333,6 +334,7 @@ If you are actively developing a sidecar-installed CMP, keep a few things in min
|
||||
3. CMP errors are cached by the repo-server in Redis. Restarting the repo-server Pod will not clear the cache. Always
|
||||
do a "Hard Refresh" when actively developing a CMP so you have the latest output.
|
||||
4. Verify your sidecar has started properly by viewing the Pod and seeing that two containers are running `kubectl get pod -l app.kubernetes.io/component=repo-server -n argocd`
|
||||
5. Write log message to stderr and set the `--loglevel=info` flag in the sidecar. This will print everything written to stderr, even on successfull command execution.
|
||||
|
||||
|
||||
### Other Common Errors
|
||||
|
||||
@@ -266,7 +266,7 @@ metadata:
|
||||
argocd.argoproj.io/secret-type: repository
|
||||
stringData:
|
||||
type: git
|
||||
repo: https://source.developers.google.com/p/my-google-project/r/my-repo
|
||||
url: https://source.developers.google.com/p/my-google-project/r/my-repo
|
||||
gcpServiceAccountKey: |
|
||||
{
|
||||
"type": "service_account",
|
||||
|
||||
@@ -17,6 +17,8 @@ which does not require a restart of the application controller pods.
|
||||
|
||||
## Enabling Dynamic Distribution of Clusters
|
||||
|
||||
This feature is disabled by default while it is in alpha. To enable it, you must set the environment `ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION` to true when running the Application Controller.
|
||||
|
||||
In order to utilize the feature, the manifests `manifests/ha/base/controller-deployment/` can be applied as a Kustomize
|
||||
overlay. This overlay sets the StatefulSet replicas to `0` and deploys the application controller as a Deployment. The
|
||||
dynamic distribution code automatically kicks in when the controller is deployed as a Deployment.
|
||||
|
||||
@@ -159,6 +159,7 @@ data:
|
||||
|
||||
g, your-github-org:your-team, role:org-admin
|
||||
```
|
||||
|
||||
----
|
||||
|
||||
Another `policy.csv` example might look as follows:
|
||||
|
||||
@@ -15,58 +15,60 @@ argocd-application-controller [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--app-hard-resync int Time period in seconds for application hard resync.
|
||||
--app-resync int Time period in seconds for application resync. (default 180)
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-application-controller
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20)
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric
|
||||
--metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
|
||||
--metrics-port int Start metrics server on given port (default 8082)
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--operation-processors int Number of application operation processors (default 10)
|
||||
--otlp-address string OpenTelemetry collector address to send traces to
|
||||
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
|
||||
--password string Password for basic authentication to the API server
|
||||
--persist-resource-health Enables storing the managed resources health in the Application CRD (default true)
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-server string Repo server address. (default "argocd-repo-server:8081")
|
||||
--repo-server-plaintext Disable TLS on connections to repo server
|
||||
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
|
||||
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--status-processors int Number of application status processors (default 20)
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
--app-hard-resync int Time period in seconds for application hard resync.
|
||||
--app-resync int Time period in seconds for application resync. (default 180)
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces that applications are allowed to be reconciled from
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--dynamic-cluster-distribution-enabled Enables dynamic cluster distribution.
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-application-controller
|
||||
--ignore-normalizer-jq-execution-timeout-seconds duration Set ignore normalizer JQ execution timeout
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--kubectl-parallelism-limit int Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit. (default 20)
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--metrics-application-labels strings List of Application labels that will be added to the argocd_application_labels metric
|
||||
--metrics-cache-expiration duration Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
|
||||
--metrics-port int Start metrics server on given port (default 8082)
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--operation-processors int Number of application operation processors (default 10)
|
||||
--otlp-address string OpenTelemetry collector address to send traces to
|
||||
--otlp-attrs strings List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)
|
||||
--password string Password for basic authentication to the API server
|
||||
--persist-resource-health Enables storing the managed resources health in the Application CRD (default true)
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
--redis-client-certificate string Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-client-key string Path to Redis client key (e.g. /etc/certs/redis/client.crt).
|
||||
--redis-compress string Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none) (default "gzip")
|
||||
--redis-insecure-skip-tls-verify Skip Redis server certificate validation.
|
||||
--redis-use-tls Use TLS when connecting to Redis.
|
||||
--redisdb int Redis database.
|
||||
--repo-server string Repo server address. (default "argocd-repo-server:8081")
|
||||
--repo-server-plaintext Disable TLS on connections to repo server
|
||||
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
|
||||
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--sharding-method string Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] (default "legacy")
|
||||
--status-processors int Number of application status processors (default 20)
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
```
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ argocd-repo-server [flags]
|
||||
--disable-helm-manifest-max-extracted-size Disable maximum size of helm manifest archives when extracted
|
||||
--disable-tls Disable TLS on the gRPC endpoint
|
||||
--helm-manifest-max-extracted-size string Maximum size of helm manifest archives when extracted (default "1G")
|
||||
--helm-registry-max-index-size string Maximum size of registry index file (default "1G")
|
||||
-h, --help help for argocd-repo-server
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
|
||||
@@ -16,6 +16,7 @@ argocd-server [flags]
|
||||
|
||||
```
|
||||
--address string Listen on given address (default "0.0.0.0")
|
||||
--api-content-types string Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty. (default "application/json")
|
||||
--app-state-cache-expiration duration Cache expiration for app state (default 1h0m0s)
|
||||
--application-namespaces strings List of additional namespaces where application resources can be managed in
|
||||
--as string Username to impersonate for the operation
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 2.9 | v1.28, v1.27, v1.26, v1.25 |
|
||||
| 2.8 | v1.27, v1.26, v1.25, v1.24 |
|
||||
| 2.7 | v1.26, v1.25, v1.24, v1.23 |
|
||||
| 2.6 | v1.24, v1.23, v1.22 |
|
||||
| 2.5 | v1.24, v1.23, v1.22 |
|
||||
|
||||
|
||||
58
docs/operator-manual/upgrading/2.8-2.9.md
Normal file
58
docs/operator-manual/upgrading/2.8-2.9.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# 2.8 to 2.9
|
||||
|
||||
## Upgraded Kustomize Version
|
||||
|
||||
Note that bundled Kustomize version has been upgraded from 5.1.0 to 5.2.1.
|
||||
|
||||
## Egress NetworkPolicy for `argocd-redis` and `argocd-redis-ha-haproxy`
|
||||
|
||||
Starting with Argo CD 2.9.16, the NetworkPolicy for the `argocd-redis` and `argocd-redis-ha-haproxy` dropped Egress restrictions. This change was made
|
||||
to allow access to the Kubernetes API to create a secret to secure Redis access.
|
||||
|
||||
To retain similar networking restrictions as before 2.9.16, you can add an Egress rule to allow access only to the
|
||||
Kubernetes API and access needed by Redis itself. The Egress rule for Kubernetes access will depend entirely on your
|
||||
Kubernetes setup. The access for Redis itself can be allowed by adding the following to the
|
||||
`argocd-redis-network-policy` NetworkPolicy:
|
||||
|
||||
```diff
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: argocd-redis-network-policy
|
||||
spec:
|
||||
policyTypes:
|
||||
- Ingress
|
||||
+ - Egress
|
||||
+ egress:
|
||||
+ - ports:
|
||||
+ - port: 53
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
|
||||
```diff
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: argocd-redis-ha-haproxy
|
||||
spec:
|
||||
policyTypes:
|
||||
- Ingress
|
||||
+ - Egress
|
||||
+ egress:
|
||||
+ - ports:
|
||||
+ - port: 6379
|
||||
+ protocol: TCP
|
||||
+ - port: 26379
|
||||
+ protocol: TCP
|
||||
+ to:
|
||||
+ - podSelector:
|
||||
+ matchLabels:
|
||||
+ app.kubernetes.io/name: argocd-redis-ha
|
||||
+ - ports:
|
||||
+ - port: 53
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
@@ -37,6 +37,7 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/<v
|
||||
|
||||
<hr/>
|
||||
|
||||
* [v2.8 to v2.9](./2.8-2.9.md)
|
||||
* [v2.7 to v2.8](./2.7-2.8.md)
|
||||
* [v2.6 to v2.7](./2.6-2.7.md)
|
||||
* [v2.5 to v2.6](./2.5-2.6.md)
|
||||
|
||||
@@ -31,6 +31,7 @@ This proposal is experimental, meaning after trying a single bounty, we will rev
|
||||
|
||||
#### Creating a Bounty
|
||||
A bounty is a special proposal created under `docs/proposals/feature-bounties`.
|
||||
|
||||
* A bounty proposal may only be created by an existing Argo maintainer.
|
||||
* The proposal document must be reviewed in regular maintainer meetings and an invitation for feedback will provide 7-days to comment.
|
||||
* Bounty should have approval with [lazy-consensus](https://community.apache.org/committers/lazyConsensus.html)
|
||||
|
||||
@@ -53,6 +53,7 @@ argocd admin [flags]
|
||||
* [argocd admin initial-password](argocd_admin_initial-password.md) - Prints initial password to log in to Argo CD for the first time
|
||||
* [argocd admin notifications](argocd_admin_notifications.md) - Set of CLI commands that helps manage notifications settings
|
||||
* [argocd admin proj](argocd_admin_proj.md) - Manage projects configuration
|
||||
* [argocd admin redis-initial-password](argocd_admin_redis-initial-password.md) - Ensure the Redis password exists, creating a new one if necessary.
|
||||
* [argocd admin repo](argocd_admin_repo.md) - Manage repositories configuration
|
||||
* [argocd admin settings](argocd_admin_settings.md) - Provides set of commands for settings validation and troubleshooting
|
||||
|
||||
|
||||
@@ -11,30 +11,31 @@ argocd admin app get-reconcile-results PATH [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
-h, --help help for get-reconcile-results
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--l string Label selector
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--o string Output format (yaml|json) (default "yaml")
|
||||
--password string Password for basic authentication to the API server
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--refresh If set to true then recalculates apps reconciliation
|
||||
--repo-server string Repo server address.
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
-h, --help help for get-reconcile-results
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--l string Label selector
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--o string Output format (yaml|json) (default "yaml")
|
||||
--password string Password for basic authentication to the API server
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--refresh If set to true then recalculates apps reconciliation
|
||||
--repo-server string Repo server address.
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -29,6 +29,7 @@ argocd admin dashboard [flags]
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--redis-compress string Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none) (default "gzip")
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
@@ -58,7 +59,6 @@ argocd admin dashboard [flags]
|
||||
--redis-haproxy-name string Name of the Redis HA Proxy; set this or the ARGOCD_REDIS_HAPROXY_NAME environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart (default "argocd-redis-ha-haproxy")
|
||||
--redis-name string Name of the Redis deployment; set this or the ARGOCD_REDIS_NAME environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart (default "argocd-redis")
|
||||
--repo-server-name string Name of the Argo CD Repo server; set this or the ARGOCD_REPO_SERVER_NAME environment variable when the server's name label differs from the default, for example when installing via the Helm chart (default "argocd-repo-server")
|
||||
--server string Argo CD server address
|
||||
--server-crt string Server certificate file
|
||||
--server-name string Name of the Argo CD API server; set this or the ARGOCD_SERVER_NAME environment variable when the server's name label differs from the default, for example when installing via the Helm chart (default "argocd-server")
|
||||
```
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
# `argocd admin redis-initial-password` Command Reference
|
||||
|
||||
## argocd admin redis-initial-password
|
||||
|
||||
Ensure the Redis password exists, creating a new one if necessary.
|
||||
|
||||
```
|
||||
argocd admin redis-initial-password [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--as string Username to impersonate for the operation
|
||||
--as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
|
||||
--as-uid string UID to impersonate for the operation
|
||||
--certificate-authority string Path to a cert file for the certificate authority
|
||||
--client-certificate string Path to a client certificate file for TLS
|
||||
--client-key string Path to a client key file for TLS
|
||||
--cluster string The name of the kubeconfig cluster to use
|
||||
--context string The name of the kubeconfig context to use
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
-h, --help help for redis-initial-password
|
||||
--insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
--password string Password for basic authentication to the API server
|
||||
--proxy-url string If provided, this URL will be used to connect via proxy
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
--tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
|
||||
--token string Bearer token for authentication to the API server
|
||||
--user string The name of the kubeconfig user to use
|
||||
--username string Username for basic authentication to the API server
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--auth-token string Authentication token
|
||||
--client-crt string Client certificate file
|
||||
--client-crt-key string Client certificate key file
|
||||
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")
|
||||
--controller-name string Name of the Argo CD Application controller; set this or the ARGOCD_APPLICATION_CONTROLLER_NAME environment variable when the controller's name label differs from the default, for example when installing via the Helm chart (default "argocd-application-controller")
|
||||
--core If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server
|
||||
--grpc-web Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.
|
||||
--grpc-web-root-path string Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.
|
||||
-H, --header strings Sets additional header to all requests made by Argo CD CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers)
|
||||
--http-retry-max int Maximum number of retries to establish http connection to Argo CD server
|
||||
--insecure Skip server certificate and domain verification
|
||||
--kube-context string Directs the command to the given kube-context
|
||||
--logformat string Set the logging format. One of: text|json (default "text")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--plaintext Disable TLS
|
||||
--port-forward Connect to a random argocd-server port using port forwarding
|
||||
--port-forward-namespace string Namespace name which should be used for port forwarding
|
||||
--redis-haproxy-name string Name of the Redis HA Proxy; set this or the ARGOCD_REDIS_HAPROXY_NAME environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart (default "argocd-redis-ha-haproxy")
|
||||
--redis-name string Name of the Redis deployment; set this or the ARGOCD_REDIS_NAME environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart (default "argocd-redis")
|
||||
--repo-server-name string Name of the Argo CD Repo server; set this or the ARGOCD_REPO_SERVER_NAME environment variable when the server's name label differs from the default, for example when installing via the Helm chart (default "argocd-repo-server")
|
||||
--server-crt string Server certificate file
|
||||
--server-name string Name of the Argo CD API server; set this or the ARGOCD_SERVER_NAME environment variable when the server's name label differs from the default, for example when installing via the Helm chart (default "argocd-server")
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [argocd admin](argocd_admin.md) - Contains a set of commands useful for Argo CD administrators and requires direct Kubernetes access
|
||||
|
||||
@@ -22,7 +22,8 @@ argocd admin settings resource-overrides ignore-resource-updates ./deploy.yaml -
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for ignore-resource-updates
|
||||
-h, --help help for ignore-resource-updates
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -17,15 +17,16 @@ argocd app diff APPNAME [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--exit-code Return non-zero exit code when there is a diff (default true)
|
||||
--hard-refresh Refresh application data as well as target manifests cache
|
||||
-h, --help help for diff
|
||||
--local string Compare live app to a local manifests
|
||||
--local-include stringArray Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path. (default [*.yaml,*.yml,*.json])
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
--refresh Refresh application data when retrieving
|
||||
--revision string Compare live app to a particular revision
|
||||
--server-side-generate Used with --local, this will send your manifests to the server for diffing
|
||||
--exit-code Return non-zero exit code when there is a diff (default true)
|
||||
--hard-refresh Refresh application data as well as target manifests cache
|
||||
-h, --help help for diff
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
--local string Compare live app to a local manifests
|
||||
--local-include stringArray Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path. (default [*.yaml,*.yml,*.json])
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
--refresh Refresh application data when retrieving
|
||||
--revision string Compare live app to a particular revision
|
||||
--server-side-generate Used with --local, this will send your manifests to the server for diffing
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -38,31 +38,33 @@ argocd app sync [APPNAME... | -l selector | --project project-name] [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--apply-out-of-sync-only Sync only out-of-sync resources
|
||||
--assumeYes Assume yes as answer for all user queries or prompts
|
||||
--async Do not wait for application to sync before continuing
|
||||
--dry-run Preview apply without affecting cluster
|
||||
--force Use a force apply
|
||||
-h, --help help for sync
|
||||
--info stringArray A list of key-value pairs during sync process. These infos will be persisted in app.
|
||||
--label stringArray Sync only specific resources with a label. This option may be specified repeatedly.
|
||||
--local string Path to a local directory. When this flag is present no git queries will be made
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
-o, --output string Output format. One of: json|yaml|wide|tree|tree=detailed (default "wide")
|
||||
--preview-changes Preview difference against the target and live state before syncing app and wait for user confirmation
|
||||
--project stringArray Sync apps that belong to the specified projects. This option may be specified repeatedly.
|
||||
--prune Allow deleting unexpected resources
|
||||
--replace Use a kubectl create/replace instead apply
|
||||
--resource stringArray Sync only specific resources as GROUP:KIND:NAME or !GROUP:KIND:NAME. Fields may be blank and '*' can be used. This option may be specified repeatedly
|
||||
--retry-backoff-duration duration Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h) (default 5s)
|
||||
--retry-backoff-factor int Factor multiplies the base duration after each failed retry (default 2)
|
||||
--retry-backoff-max-duration duration Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h) (default 3m0s)
|
||||
--retry-limit int Max number of allowed sync retries
|
||||
--revision string Sync to a specific revision. Preserves parameter overrides
|
||||
-l, --selector string Sync apps that match this label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.
|
||||
--server-side Use server-side apply while syncing the application
|
||||
--strategy string Sync strategy (one of: apply|hook)
|
||||
--timeout uint Time out after this many seconds
|
||||
-N, --app-namespace string Only sync an application in namespace
|
||||
--apply-out-of-sync-only Sync only out-of-sync resources
|
||||
--assumeYes Assume yes as answer for all user queries or prompts
|
||||
--async Do not wait for application to sync before continuing
|
||||
--dry-run Preview apply without affecting cluster
|
||||
--force Use a force apply
|
||||
-h, --help help for sync
|
||||
--ignore-normalizer-jq-execution-timeout duration Set ignore normalizer JQ execution timeout (default 1s)
|
||||
--info stringArray A list of key-value pairs during sync process. These infos will be persisted in app.
|
||||
--label stringArray Sync only specific resources with a label. This option may be specified repeatedly.
|
||||
--local string Path to a local directory. When this flag is present no git queries will be made
|
||||
--local-repo-root string Path to the repository root. Used together with --local allows setting the repository root (default "/")
|
||||
-o, --output string Output format. One of: json|yaml|wide|tree|tree=detailed (default "wide")
|
||||
--preview-changes Preview difference against the target and live state before syncing app and wait for user confirmation
|
||||
--project stringArray Sync apps that belong to the specified projects. This option may be specified repeatedly.
|
||||
--prune Allow deleting unexpected resources
|
||||
--replace Use a kubectl create/replace instead apply
|
||||
--resource stringArray Sync only specific resources as GROUP:KIND:NAME or !GROUP:KIND:NAME. Fields may be blank and '*' can be used. This option may be specified repeatedly
|
||||
--retry-backoff-duration duration Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h) (default 5s)
|
||||
--retry-backoff-factor int Factor multiplies the base duration after each failed retry (default 2)
|
||||
--retry-backoff-max-duration duration Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h) (default 3m0s)
|
||||
--retry-limit int Max number of allowed sync retries
|
||||
--revision string Sync to a specific revision. Preserves parameter overrides
|
||||
-l, --selector string Sync apps that match this label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.
|
||||
--server-side Use server-side apply while syncing the application
|
||||
--strategy string Sync strategy (one of: apply|hook)
|
||||
--timeout uint Time out after this many seconds
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -182,3 +182,16 @@ data:
|
||||
```
|
||||
|
||||
The list of supported Kubernetes types is available in [diffing_known_types.txt](https://raw.githubusercontent.com/argoproj/argo-cd/master/util/argo/normalizers/diffing_known_types.txt)
|
||||
|
||||
|
||||
### JQ Path expression timeout
|
||||
|
||||
By default, the evaluation of a JQPathExpression is limited to one second. If you encounter a "JQ patch execution timed out" error message due to a complex JQPathExpression that requires more time to evaluate, you can extend the timeout period by configuring the `ignore.normalizer.jq.timeout` setting within the `argocd-cmd-params-cm` ConfigMap.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
ignore.normalizer.jq.timeout: "5s"
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-notifications-controller-cluster-apps
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: notifications-controller
|
||||
name: argocd-notifications-controller-cluster-apps
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
resources:
|
||||
- "applications"
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
@@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-notifications-controller-cluster-apps
|
||||
app.kubernetes.io/part-of: argocd
|
||||
app.kubernetes.io/component: notifications-controller
|
||||
name: argocd-notifications-controller-cluster-apps
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argocd-notifications-controller-cluster-apps
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: argocd-notifications-controller
|
||||
namespace: argocd
|
||||
@@ -3,3 +3,5 @@ kind: Kustomization
|
||||
resources:
|
||||
- argocd-server-rbac-clusterrole.yaml
|
||||
- argocd-server-rbac-clusterrolebinding.yaml
|
||||
- argocd-notifications-controller-rbac-clusterrole.yaml
|
||||
- argocd-notifications-controller-rbac-clusterrolebinding.yaml
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
yq e -o=json values.yaml | jq '{
|
||||
yq e -o=json values.yaml | jq '[{
|
||||
name: "helm-parameters",
|
||||
title: "Helm Parameters",
|
||||
collectionType: "map",
|
||||
map: [leaf_paths as $path | {"key": $path | join("."), "value": getpath($path)|tostring}] | from_entries
|
||||
}'
|
||||
}]'
|
||||
|
||||
109
go.mod
109
go.mod
@@ -25,7 +25,8 @@ require (
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gfleury/go-bitbucket-v1 v0.0.0-20220301131131-8e7ed04b843e
|
||||
github.com/go-git/go-git/v5 v5.8.1
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/go-jose/go-jose/v3 v3.0.3
|
||||
github.com/go-logr/logr v1.2.4
|
||||
github.com/go-openapi/loads v0.21.2
|
||||
github.com/go-openapi/runtime v0.26.0
|
||||
@@ -35,8 +36,8 @@ require (
|
||||
github.com/gogits/go-gogs-client v0.0.0-20200905025246-8bb8a50cb355
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/go-github/v35 v35.3.0
|
||||
github.com/google/go-jsonnet v0.20.0
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
@@ -77,23 +78,22 @@ require (
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0
|
||||
go.opentelemetry.io/otel/sdk v1.16.0
|
||||
golang.org/x/crypto v0.13.0
|
||||
golang.org/x/crypto v0.19.0
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||
golang.org/x/oauth2 v0.11.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/term v0.12.0
|
||||
golang.org/x/term v0.17.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc
|
||||
google.golang.org/grpc v1.56.2
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/square/go-jose.v2 v2.6.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/api v0.24.17
|
||||
k8s.io/apiextensions-apiserver v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/apiserver v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
k8s.io/code-generator v0.24.2
|
||||
k8s.io/apimachinery v0.24.17
|
||||
k8s.io/apiserver v0.24.17
|
||||
k8s.io/client-go v0.24.17
|
||||
k8s.io/code-generator v0.24.17
|
||||
k8s.io/klog/v2 v2.70.1
|
||||
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8
|
||||
k8s.io/kubectl v0.24.2
|
||||
@@ -151,9 +151,8 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/PagerDuty/go-pagerduty v1.7.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -175,8 +174,7 @@ require (
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.5.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
@@ -234,7 +232,7 @@ require (
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
@@ -243,7 +241,7 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sergi/go-diff v1.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.2 // indirect
|
||||
github.com/slack-go/slack v0.12.2 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
@@ -260,12 +258,12 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/net v0.15.0 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.19.0
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
gomodules.xyz/notify v0.1.1 // indirect
|
||||
@@ -274,12 +272,12 @@ require (
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
k8s.io/cli-runtime v0.24.2 // indirect
|
||||
k8s.io/component-base v0.24.2 // indirect
|
||||
k8s.io/component-helpers v0.24.2 // indirect
|
||||
k8s.io/cli-runtime v0.24.17 // indirect
|
||||
k8s.io/component-base v0.24.17 // indirect
|
||||
k8s.io/component-helpers v0.24.17 // indirect
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
||||
k8s.io/kube-aggregator v0.24.2 // indirect
|
||||
k8s.io/kubernetes v1.24.2 // indirect
|
||||
k8s.io/kubernetes v1.24.17 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.11.5 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.7 // indirect
|
||||
@@ -289,9 +287,12 @@ replace (
|
||||
// https://github.com/golang/go/issues/33546#issuecomment-519656923
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.4.2
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.5.4
|
||||
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
|
||||
// Avoid CVE-2023-46402
|
||||
github.com/whilp/git-urls => github.com/chainguard-dev/git-urls v1.0.2
|
||||
|
||||
// Avoid CVE-2022-3064
|
||||
gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0
|
||||
|
||||
@@ -299,29 +300,31 @@ replace (
|
||||
gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
|
||||
|
||||
// https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-505627280
|
||||
k8s.io/api => k8s.io/api v0.24.2
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.2
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.24.2
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.24.2
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.2
|
||||
k8s.io/client-go => k8s.io/client-go v0.24.2
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.2
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.2
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.24.2
|
||||
k8s.io/component-base => k8s.io/component-base v0.24.2
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.24.2
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.24.2
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.24.2
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.2
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.2
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.2
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.2
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.2
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.24.2
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.24.2
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.2
|
||||
k8s.io/metrics => k8s.io/metrics v0.24.2
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.24.2
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.2
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.2
|
||||
k8s.io/api => k8s.io/api v0.24.17
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.17
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.24.17
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.24.17
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.17
|
||||
k8s.io/client-go => k8s.io/client-go v0.24.17
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.17
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.17
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.24.17
|
||||
k8s.io/component-base => k8s.io/component-base v0.24.17
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.24.17
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.24.17
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.24.17
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.17
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.17
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.17
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.17
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.17
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.24.17
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.24.17
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.17
|
||||
k8s.io/metrics => k8s.io/metrics v0.24.17
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.24.17
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.17
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.17
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.24.17
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.24.17
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
@@ -61,6 +62,11 @@ func updateMkDocsNav(parent string, child string, subchild string, files []strin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The marshaller drops custom tags, so re-add this one. Turns out this is much less invasive than trying to handle
|
||||
// it at the YAML parser level.
|
||||
newmkdocs = bytes.Replace(newmkdocs, []byte("site_url: READTHEDOCS_CANONICAL_URL"), []byte("site_url: !ENV READTHEDOCS_CANONICAL_URL"), 1)
|
||||
|
||||
return os.WriteFile("mkdocs.yml", newmkdocs, 0644)
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
55a8e6dce87a1e52c61e0ce7a89bf85b38725ba3e8deb51d4a08ade8a2c70b2d helm-v3.13.2-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f5654aaed63a0da72852776e1d3f851b2ea9529cb5696337202703c2e1ed2321 helm-v3.13.2-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
11d96134cc4ec106c23cd8c163072e9aed6cd73e36a3da120e5876d426203f37 helm-v3.13.2-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
3ffc5b4a041e5306dc00905ebe5dfea449e34ada268a713d34c69709afd6a9a2 helm-v3.13.2-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
b7aba749da75d33e6fea49a5098747d379abc45583ff5cd16e2356127a396549 kustomize_5.2.1_darwin_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
f6a5f3cffd45bac585a0c80b5ed855c2b72d932a1d6e8e7c87aae3be4eba5750 kustomize_5.2.1_darwin_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
88346543206b889f9287c0b92c70708040ecd5aad54dd33019c4d6579cd24de8 kustomize_5.2.1_linux_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
5566f7badece5a72d42075d8dffa6296a228966dd6ac2390de7afbb9675c3aaa kustomize_5.2.1_linux_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
82d732cf624b6fa67dfabe751e9a1510e2d08605996b1b130b4c0f5b835b130e kustomize_5.2.1_linux_ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
d94cb97a2776b4685ab41233dfd5f0b426f399d2fce87d2b69e1ce4907f3aad2 kustomize_5.2.1_linux_s390x.tar.gz
|
||||
@@ -11,8 +11,8 @@
|
||||
# Use ./hack/installers/checksums/add-helm-checksums.sh and
|
||||
# add-kustomize-checksums.sh to help download checksums.
|
||||
###############################################################################
|
||||
helm3_version=3.12.1
|
||||
helm3_version=3.13.2
|
||||
kubectl_version=1.17.8
|
||||
kubectx_version=0.6.3
|
||||
kustomize5_version=5.1.0
|
||||
kustomize5_version=5.2.1
|
||||
protoc_version=3.17.3
|
||||
|
||||
@@ -22,6 +22,11 @@ spec:
|
||||
env:
|
||||
- name: ARGOCD_CONTROLLER_REPLICAS
|
||||
value: "1"
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
- name: ARGOCD_RECONCILIATION_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -215,4 +220,4 @@ spec:
|
||||
- key: tls.key
|
||||
path: tls.key
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
path: ca.crt
|
||||
|
||||
@@ -36,3 +36,11 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
@@ -21,6 +21,11 @@ spec:
|
||||
- args:
|
||||
- /usr/local/bin/argocd-application-controller
|
||||
env:
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
- name: ARGOCD_CONTROLLER_REPLICAS
|
||||
value: "1"
|
||||
- name: ARGOCD_RECONCILIATION_TIMEOUT
|
||||
@@ -155,6 +160,12 @@ spec:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.kubectl.parallelism.limit
|
||||
optional: true
|
||||
- name: ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: controller.ignore.normalizer.jq.timeout
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v2.9.16
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -48,6 +48,12 @@ spec:
|
||||
key: notificationscontroller.log.level
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATION_NAMESPACES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: application.namespaces
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
workingDir: /app
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user