mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-26 20:48:46 +01:00
Compare commits
13 Commits
v3.2.2
...
renovate/g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f04ca4a967 | ||
|
|
9ef837c326 | ||
|
|
c11d35a20f | ||
|
|
a7a07e2cd8 | ||
|
|
9faa6098ed | ||
|
|
0fb6c51f9d | ||
|
|
dbef22c843 | ||
|
|
47142b89f4 | ||
|
|
98a22612dd | ||
|
|
6cce4b29b9 | ||
|
|
9087ad7282 | ||
|
|
c377101491 | ||
|
|
1d13ebc372 |
18
.github/workflows/ci-build.yaml
vendored
18
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.0'
|
||||
GOLANG_VERSION: '1.25.1'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -308,7 +308,7 @@ jobs:
|
||||
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
|
||||
with:
|
||||
# renovate: datasource=node-version packageName=node versioning=node
|
||||
node-version: '22.9.0'
|
||||
node-version: '22.19.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
@@ -407,7 +407,7 @@ jobs:
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: oracle-vm-16cpu-64gb-x86-64
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -426,7 +426,7 @@ jobs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
GOPATH: /home/ubuntu/go
|
||||
GOPATH: /home/runner/go
|
||||
ARGOCD_FAKE_IN_CLUSTER: 'true'
|
||||
ARGOCD_SSH_DATA_PATH: '/tmp/argo-e2e/app/config/ssh'
|
||||
ARGOCD_TLS_DATA_PATH: '/tmp/argo-e2e/app/config/tls'
|
||||
@@ -462,9 +462,9 @@ jobs:
|
||||
set -x
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
sudo chmod -R a+rw /etc/rancher/k3s
|
||||
sudo mkdir -p $HOME/.kube && sudo chown -R ubuntu $HOME/.kube
|
||||
sudo mkdir -p $HOME/.kube && sudo chown -R runner $HOME/.kube
|
||||
sudo k3s kubectl config view --raw > $HOME/.kube/config
|
||||
sudo chown ubuntu $HOME/.kube/config
|
||||
sudo chown runner $HOME/.kube/config
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
@@ -474,7 +474,7 @@ jobs:
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Add ~/go/bin to PATH
|
||||
run: |
|
||||
echo "/home/ubuntu/go/bin" >> $GITHUB_PATH
|
||||
echo "/home/runner/go/bin" >> $GITHUB_PATH
|
||||
- name: Add /usr/local/bin to PATH
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
@@ -496,11 +496,11 @@ jobs:
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.43.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:8.2.2-alpine
|
||||
docker pull redis:8.2.1-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
chown ubuntu dist
|
||||
chown runner dist
|
||||
- name: Run E2E server and wait for it being available
|
||||
timeout-minutes: 30
|
||||
run: |
|
||||
|
||||
4
.github/workflows/image.yaml
vendored
4
.github/workflows/image.yaml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.0
|
||||
go-version: 1.25.1
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.0
|
||||
go-version: 1.25.1
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
70
.github/workflows/release.yaml
vendored
70
.github/workflows/release.yaml
vendored
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.0' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.25.1' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -25,49 +25,13 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.0
|
||||
go-version: 1.25.1
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
setup-variables:
|
||||
name: Setup Release Variables
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
is_pre_release: ${{ steps.var.outputs.is_pre_release }}
|
||||
is_latest_release: ${{ steps.var.outputs.is_latest_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Setup variables
|
||||
id: var
|
||||
run: |
|
||||
set -xue
|
||||
# Fetch all tag information
|
||||
git fetch --prune --tags --force
|
||||
|
||||
LATEST_RELEASE_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | grep -v '-' | tail -n1)
|
||||
|
||||
PRE_RELEASE=false
|
||||
# Check if latest tag is a pre-release
|
||||
if echo ${{ github.ref_name }} | grep -E -- '-rc[0-9]+$';then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
IS_LATEST=false
|
||||
# Ensure latest release tag matches github.ref_name
|
||||
if [[ $LATEST_RELEASE_TAG == ${{ github.ref_name }} ]];then
|
||||
IS_LATEST=true
|
||||
fi
|
||||
echo "is_pre_release=$PRE_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "is_latest_release=$IS_LATEST" >> $GITHUB_OUTPUT
|
||||
|
||||
argocd-image-provenance:
|
||||
needs: [argocd-image]
|
||||
permissions:
|
||||
@@ -86,17 +50,15 @@ jobs:
|
||||
|
||||
goreleaser:
|
||||
needs:
|
||||
- setup-variables
|
||||
- argocd-image
|
||||
- argocd-image-provenance
|
||||
permissions:
|
||||
contents: write # used for uploading assets
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GORELEASER_MAKE_LATEST: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
outputs:
|
||||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
@@ -180,7 +142,7 @@ jobs:
|
||||
permissions:
|
||||
contents: write # Needed for release uploads
|
||||
outputs:
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes }}
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes}}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
@@ -259,7 +221,6 @@ jobs:
|
||||
|
||||
post-release:
|
||||
needs:
|
||||
- setup-variables
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
- generate-sbom
|
||||
@@ -268,8 +229,6 @@ jobs:
|
||||
pull-requests: write # Needed to create PR for VERSION update.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
TAG_STABLE: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
@@ -283,6 +242,27 @@ jobs:
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
|
||||
- name: Check if tag is the latest version and not a pre-release
|
||||
run: |
|
||||
set -xue
|
||||
# Fetch all tag information
|
||||
git fetch --prune --tags --force
|
||||
|
||||
LATEST_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n1)
|
||||
|
||||
PRE_RELEASE=false
|
||||
# Check if latest tag is a pre-release
|
||||
if echo $LATEST_TAG | grep -E -- '-rc[0-9]+$';then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
# Ensure latest tag matches github.ref_name & not a pre-release
|
||||
if [[ $LATEST_TAG == ${{ github.ref_name }} ]] && [[ $PRE_RELEASE != 'true' ]];then
|
||||
echo "TAG_STABLE=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "TAG_STABLE=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update stable tag to latest version
|
||||
run: |
|
||||
git tag -f stable ${{ github.ref_name }}
|
||||
|
||||
1
.github/workflows/renovate.yaml
vendored
1
.github/workflows/renovate.yaml
vendored
@@ -10,6 +10,7 @@ permissions:
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
steps:
|
||||
- name: Get token
|
||||
id: get_token
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -20,7 +20,6 @@ node_modules/
|
||||
.kube/
|
||||
./test/cmp/*.sock
|
||||
.envrc.remote
|
||||
.mirrord/
|
||||
.*.swp
|
||||
rerunreport.txt
|
||||
|
||||
|
||||
@@ -49,14 +49,13 @@ archives:
|
||||
- argocd-cli
|
||||
name_template: |-
|
||||
{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}
|
||||
formats: [binary]
|
||||
formats: [ binary ]
|
||||
|
||||
checksum:
|
||||
name_template: 'cli_checksums.txt'
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
make_latest: '{{ .Env.GORELEASER_MAKE_LATEST }}'
|
||||
prerelease: auto
|
||||
draft: false
|
||||
header: |
|
||||
|
||||
@@ -24,6 +24,7 @@ packages:
|
||||
Renderer: {}
|
||||
github.com/argoproj/argo-cd/v3/commitserver/apiclient:
|
||||
interfaces:
|
||||
Clientset: {}
|
||||
CommitServiceClient: {}
|
||||
github.com/argoproj/argo-cd/v3/commitserver/commit:
|
||||
interfaces:
|
||||
@@ -34,7 +35,6 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/controller/hydrator:
|
||||
interfaces:
|
||||
Dependencies: {}
|
||||
RepoGetter: {}
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster:
|
||||
interfaces:
|
||||
ClusterServiceServer: {}
|
||||
|
||||
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS builder
|
||||
FROM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -85,7 +85,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:e643c0b70dca9704dff42e12b17f5b719dbe4f95e6392fc2dfa0c5f02ea8044d AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.11.1@sha256:9a25b5a6f9a90218b73a62205f111e71de5e4289aee952b4dd7e86f7498f2544 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6
|
||||
FROM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -46,6 +47,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/controllers/template"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/metrics"
|
||||
@@ -75,7 +78,6 @@ const (
|
||||
var defaultPreservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
argov1alpha1.AnnotationKeyHydrate,
|
||||
}
|
||||
|
||||
type deleteInOrder struct {
|
||||
@@ -101,7 +103,6 @@ type ApplicationSetReconciler struct {
|
||||
GlobalPreservedAnnotations []string
|
||||
GlobalPreservedLabels []string
|
||||
Metrics *metrics.ApplicationsetMetrics
|
||||
MaxResourcesStatusCount int
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -228,8 +229,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get update resources status for application set: %w", err)
|
||||
}
|
||||
|
||||
// appMap is a name->app collection of Applications in this ApplicationSet.
|
||||
appMap := map[string]argov1alpha1.Application{}
|
||||
// appSyncMap tracks which apps will be synced during this reconciliation.
|
||||
appSyncMap := map[string]bool{}
|
||||
|
||||
@@ -243,33 +242,11 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
} else if isRollingSyncStrategy(&applicationSetInfo) {
|
||||
// The appset uses progressive sync with `RollingSync` strategy
|
||||
for _, app := range currentApplications {
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications, appMap)
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Progressive Sync is disabled, clear any existing applicationStatus to prevent stale data
|
||||
if len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
logCtx.Infof("Progressive Sync disabled, removing %v AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, logCtx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear AppSet application statuses when Progressive Sync is disabled for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range generatedApplications {
|
||||
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
|
||||
validApps = append(validApps, generatedApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(validateErrors) > 0 {
|
||||
@@ -299,13 +276,25 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
)
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range generatedApplications {
|
||||
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
|
||||
validApps = append(validApps, generatedApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(&applicationSetInfo) {
|
||||
validApps = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
validApps = r.syncDesiredApplications(logCtx, &applicationSetInfo, appSyncMap, validApps)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort apps by name so they are updated/created in the same order, and condition errors are the same
|
||||
sort.Slice(validApps, func(i, j int) bool {
|
||||
return validApps[i].Name < validApps[j].Name
|
||||
})
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() {
|
||||
err = r.createOrUpdateInCluster(ctx, logCtx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
@@ -337,6 +326,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
|
||||
// Delete the generatedApplications instead of the validApps because we want to be able to delete applications in error/invalid state
|
||||
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, generatedApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -943,7 +933,7 @@ func (r *ApplicationSetReconciler) removeOwnerReferencesOnDeleteAppSet(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application) (map[string]bool, error) {
|
||||
appDependencyList, appStepMap := r.buildAppDependencyList(logCtx, appset, desiredApplications)
|
||||
|
||||
_, err := r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
|
||||
@@ -952,21 +942,21 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
|
||||
}
|
||||
|
||||
logCtx.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
for i, step := range appDependencyList {
|
||||
logCtx.Infof("step %v: %+v", i+1, step)
|
||||
for stepIndex, applicationNames := range appDependencyList {
|
||||
logCtx.Infof("step %v: %+v", stepIndex+1, applicationNames)
|
||||
}
|
||||
|
||||
appSyncMap := r.buildAppSyncMap(appset, appDependencyList, appMap)
|
||||
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
appsToSync := r.getAppsToSync(appset, appDependencyList, applications)
|
||||
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appsToSync)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap)
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appsToSync, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
|
||||
}
|
||||
|
||||
_ = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
|
||||
|
||||
return appSyncMap, nil
|
||||
return appsToSync, nil
|
||||
}
|
||||
|
||||
// this list tracks which Applications belong to each RollingUpdate step
|
||||
@@ -1040,55 +1030,53 @@ func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov
|
||||
return valueMatched
|
||||
}
|
||||
|
||||
// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop
|
||||
func (r *ApplicationSetReconciler) buildAppSyncMap(applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) map[string]bool {
|
||||
// getAppsToSync returns a Map of Applications that should be synced in this progressive sync wave
|
||||
func (r *ApplicationSetReconciler) getAppsToSync(applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, currentApplications []argov1alpha1.Application) map[string]bool {
|
||||
appSyncMap := map[string]bool{}
|
||||
syncEnabled := true
|
||||
currentAppsMap := map[string]bool{}
|
||||
|
||||
// healthy stages and the first non-healthy stage should have sync enabled
|
||||
// every stage after should have sync disabled
|
||||
for _, app := range currentApplications {
|
||||
currentAppsMap[app.Name] = true
|
||||
}
|
||||
|
||||
for i := range appDependencyList {
|
||||
for stepIndex := range appDependencyList {
|
||||
// set the syncEnabled boolean for every Application in the current step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
appSyncMap[appName] = syncEnabled
|
||||
for _, appName := range appDependencyList[stepIndex] {
|
||||
appSyncMap[appName] = true
|
||||
}
|
||||
|
||||
// detect if we need to halt before progressing to the next step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
// evaluate if we need to sync next waves
|
||||
syncNextWave := true
|
||||
for _, appName := range appDependencyList[stepIndex] {
|
||||
// Check if application is created and managed by this AppSet, if it is not created yet, we cannot progress
|
||||
if _, ok := currentAppsMap[appName]; !ok {
|
||||
syncNextWave = false
|
||||
break
|
||||
}
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appName)
|
||||
if idx == -1 {
|
||||
// no Application status found, likely because the Application is being newly created
|
||||
syncEnabled = false
|
||||
// No Application status found, likely because the Application is being newly created
|
||||
// This mean this wave is not yet completed
|
||||
syncNextWave = false
|
||||
break
|
||||
}
|
||||
|
||||
appStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
app, ok := appMap[appName]
|
||||
if !ok {
|
||||
// application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
|
||||
if !syncEnabled {
|
||||
if appStatus.Status != argov1alpha1.ProgressiveSyncHealthy {
|
||||
// At least one application in this wave is not yet healthy. We cannot proceed to the next wave
|
||||
syncNextWave = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !syncNextWave {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return appSyncMap
|
||||
}
|
||||
|
||||
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(appset) {
|
||||
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
|
||||
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isRollingSyncStrategy(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// It's only RollingSync if the type specifically sets it
|
||||
return appset.Spec.Strategy != nil && appset.Spec.Strategy.Type == "RollingSync" && appset.Spec.Strategy.RollingSync != nil
|
||||
@@ -1099,29 +1087,21 @@ func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.Application
|
||||
return isRollingSyncStrategy(appset) && len(appset.Spec.Strategy.RollingSync.Steps) > 0
|
||||
}
|
||||
|
||||
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
|
||||
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
if healthStatusString == "Healthy" && syncStatusString != "OutOfSync" && (operationPhaseString == "Succeeded" || operationPhaseString == "") {
|
||||
return true
|
||||
func isApplicationWithError(app argov1alpha1.Application) bool {
|
||||
for _, condition := range app.Status.Conditions {
|
||||
if condition.Type == argov1alpha1.ApplicationConditionInvalidSpecError {
|
||||
return true
|
||||
}
|
||||
if condition.Type == argov1alpha1.ApplicationConditionUnknownError {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
healthStatusString := string(app.Status.Health.Status)
|
||||
syncStatusString := string(app.Status.Sync.Status)
|
||||
operationPhaseString := ""
|
||||
if app.Status.OperationState != nil {
|
||||
operationPhaseString = string(app.Status.OperationState.Phase)
|
||||
}
|
||||
|
||||
return healthStatusString, syncStatusString, operationPhaseString
|
||||
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
|
||||
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
|
||||
}
|
||||
|
||||
func getAppStep(appName string, appStepMap map[string]int) int {
|
||||
@@ -1140,81 +1120,112 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
|
||||
|
||||
for _, app := range applications {
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
appHealthStatus := app.Status.Health.Status
|
||||
appSyncStatus := app.Status.Sync.Status
|
||||
|
||||
currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
if idx == -1 {
|
||||
// AppStatus not found, set default status of "Waiting"
|
||||
currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{
|
||||
Application: app.Name,
|
||||
TargetRevisions: app.Status.GetRevisions(),
|
||||
LastTransitionTime: &now,
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
Message: "No Application status found, defaulting status to Waiting",
|
||||
Status: argov1alpha1.ProgressiveSyncWaiting,
|
||||
Step: strconv.Itoa(getAppStep(app.Name, appStepMap)),
|
||||
}
|
||||
} else {
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
|
||||
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
}
|
||||
}
|
||||
|
||||
statusLogCtx := logCtx.WithFields(log.Fields{
|
||||
"app.name": currentAppStatus.Application,
|
||||
"app.health": appHealthStatus,
|
||||
"app.sync": appSyncStatus,
|
||||
"status.status": currentAppStatus.Status,
|
||||
"status.message": currentAppStatus.Message,
|
||||
"status.step": currentAppStatus.Step,
|
||||
"status.targetRevisions": strings.Join(currentAppStatus.TargetRevisions, ","),
|
||||
})
|
||||
|
||||
newAppStatus := currentAppStatus.DeepCopy()
|
||||
newAppStatus.Step = strconv.Itoa(getAppStep(newAppStatus.Application, appStepMap))
|
||||
|
||||
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
|
||||
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
// A new version is available in the application and we need to re-sync the application
|
||||
newAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
newAppStatus.Message = "Application has pending changes, setting status to Waiting"
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncWaiting
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
}
|
||||
|
||||
appOutdated := false
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
appOutdated = syncStatusString == "OutOfSync"
|
||||
}
|
||||
if newAppStatus.Status == argov1alpha1.ProgressiveSyncWaiting {
|
||||
// App has changed to waiting because the TargetRevisions changed or it is a new selected app
|
||||
// This does not mean we should always sync the app. The app may not be OutOfSync
|
||||
// and may not require a sync if it does not have differences.
|
||||
if appSyncStatus == argov1alpha1.SyncStatusCodeSynced {
|
||||
if app.Status.Health.Status == health.HealthStatusHealthy {
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncHealthy
|
||||
newAppStatus.Message = "Application resource has synced, updating status to Healthy"
|
||||
} else {
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
|
||||
newAppStatus.Message = "Application resource has synced, updating status to Progressing"
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The target revision is the same, so we need to evaluate the current revision progress
|
||||
if currentAppStatus.Status == argov1alpha1.ProgressiveSyncPending {
|
||||
// No need to evaluate status health further if the application did not change since our last transition
|
||||
if app.Status.ReconciledAt == nil || (newAppStatus.LastTransitionTime != nil && app.Status.ReconciledAt.After(newAppStatus.LastTransitionTime.Time)) {
|
||||
// Validate that at least one sync was trigerred after the pending transition time
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
|
||||
statusLogCtx = statusLogCtx.WithField("app.operation", app.Status.OperationState.Phase)
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
|
||||
|
||||
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
|
||||
logCtx.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
switch {
|
||||
case app.Status.OperationState.Phase.Successful():
|
||||
newAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing"
|
||||
case app.Status.OperationState.Phase.Completed():
|
||||
newAppStatus.Message = "Application resource completed a sync, updating status from Pending to Progressing"
|
||||
default:
|
||||
// If a sync fails or has errors, the Application should be configured with retry. It is not the appset's job to retry failed syncs
|
||||
newAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing"
|
||||
}
|
||||
} else if isApplicationWithError(app) {
|
||||
// Validate if the application has errors preventing it to be reconciled and perform syncs
|
||||
// If it does, we move it to progressing.
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
|
||||
newAppStatus.Message = "Application resource has error and cannot sync, updating status to Progressing"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Pending" {
|
||||
if !appOutdated && operationPhaseString == "Succeeded" {
|
||||
logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
if currentAppStatus.Status == argov1alpha1.ProgressiveSyncProgressing {
|
||||
// If the status has reached progressing, we know a sync has been triggered. No matter the result of that operation,
|
||||
// we want an the app to reach the Healthy state for the current revision.
|
||||
if appHealthStatus == health.HealthStatusHealthy && appSyncStatus == argov1alpha1.SyncStatusCodeSynced {
|
||||
newAppStatus.LastTransitionTime = &now
|
||||
newAppStatus.Status = argov1alpha1.ProgressiveSyncHealthy
|
||||
newAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
|
||||
logCtx.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
if newAppStatus.LastTransitionTime == &now {
|
||||
statusLogCtx.WithFields(log.Fields{
|
||||
"new_status.status": newAppStatus.Status,
|
||||
"new_status.message": newAppStatus.Message,
|
||||
"new_status.step": newAppStatus.Step,
|
||||
"new_status.targetRevisions": strings.Join(newAppStatus.TargetRevisions, ","),
|
||||
}).Info("Progressive sync application changed status")
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
logCtx.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
appStatuses = append(appStatuses, *newAppStatus)
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
|
||||
@@ -1226,7 +1237,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
}
|
||||
|
||||
// check Applications that are in Waiting status and promote them to Pending if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appsToSync map[string]bool, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
|
||||
@@ -1242,12 +1253,20 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
totalCountMap[appStepMap[appStatus.Application]]++
|
||||
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
if appStatus.Status == argov1alpha1.ProgressiveSyncPending || appStatus.Status == argov1alpha1.ProgressiveSyncProgressing {
|
||||
updateCountMap[appStepMap[appStatus.Application]]++
|
||||
}
|
||||
}
|
||||
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
statusLogCtx := logCtx.WithFields(log.Fields{
|
||||
"app.name": appStatus.Application,
|
||||
"status.status": appStatus.Status,
|
||||
"status.message": appStatus.Message,
|
||||
"status.step": appStatus.Step,
|
||||
"status.targetRevisions": strings.Join(appStatus.TargetRevisions, ","),
|
||||
})
|
||||
|
||||
maxUpdateAllowed := true
|
||||
maxUpdate := &intstr.IntOrString{}
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
@@ -1258,7 +1277,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
if maxUpdate != nil {
|
||||
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
|
||||
if err != nil {
|
||||
logCtx.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
|
||||
statusLogCtx.Warnf("AppSet has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", maxUpdate, err)
|
||||
}
|
||||
|
||||
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
|
||||
@@ -1268,16 +1287,21 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
|
||||
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
|
||||
maxUpdateAllowed = false
|
||||
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap), applicationSet.Name)
|
||||
statusLogCtx.Infof("Application is not allowed to update yet, %v/%v Applications already updating in step %v", updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap))
|
||||
}
|
||||
}
|
||||
|
||||
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
|
||||
logCtx.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
|
||||
if appStatus.Status == argov1alpha1.ProgressiveSyncWaiting && appsToSync[appStatus.Application] && maxUpdateAllowed {
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
appStatus.Step = strconv.Itoa(getAppStep(appStatus.Application, appStepMap))
|
||||
appStatus.Status = argov1alpha1.ProgressiveSyncPending
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing"
|
||||
|
||||
statusLogCtx.WithFields(log.Fields{
|
||||
"new_status.status": appStatus.Status,
|
||||
"new_status.message": appStatus.Message,
|
||||
"new_status.step": appStatus.Step,
|
||||
"new_status.targetRevisions": strings.Join(appStatus.TargetRevisions, ","),
|
||||
}).Info("Progressive sync application changed status")
|
||||
|
||||
updateCountMap[appStepMap[appStatus.Application]]++
|
||||
}
|
||||
@@ -1302,9 +1326,9 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditio
|
||||
completedWaves := map[string]bool{}
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
if v, ok := completedWaves[appStatus.Step]; !ok {
|
||||
completedWaves[appStatus.Step] = appStatus.Status == "Healthy"
|
||||
completedWaves[appStatus.Step] = appStatus.Status == argov1alpha1.ProgressiveSyncHealthy
|
||||
} else {
|
||||
completedWaves[appStatus.Step] = v && appStatus.Status == "Healthy"
|
||||
completedWaves[appStatus.Step] = v && appStatus.Status == argov1alpha1.ProgressiveSyncHealthy
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1410,13 +1434,7 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
|
||||
sort.Slice(statuses, func(i, j int) bool {
|
||||
return statuses[i].Name < statuses[j].Name
|
||||
})
|
||||
resourcesCount := int64(len(statuses))
|
||||
if r.MaxResourcesStatusCount > 0 && len(statuses) > r.MaxResourcesStatusCount {
|
||||
logCtx.Warnf("Truncating ApplicationSet %s resource status from %d to max allowed %d entries", appset.Name, len(statuses), r.MaxResourcesStatusCount)
|
||||
statuses = statuses[:r.MaxResourcesStatusCount]
|
||||
}
|
||||
appset.Status.Resources = statuses
|
||||
appset.Status.ResourcesCount = resourcesCount
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
|
||||
@@ -1429,7 +1447,6 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
|
||||
}
|
||||
|
||||
updatedAppset.Status.Resources = appset.Status.Resources
|
||||
updatedAppset.Status.ResourcesCount = resourcesCount
|
||||
|
||||
// Update the newly fetched object with new status resources
|
||||
err := r.Client.Status().Update(ctx, updatedAppset)
|
||||
@@ -1526,30 +1543,31 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) []argov1alpha1.Application {
|
||||
func (r *ApplicationSetReconciler) syncDesiredApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appsToSync map[string]bool, desiredApplications []argov1alpha1.Application) []argov1alpha1.Application {
|
||||
rolloutApps := []argov1alpha1.Application{}
|
||||
for i := range validApps {
|
||||
for i := range desiredApplications {
|
||||
pruneEnabled := false
|
||||
|
||||
// ensure that Applications generated with RollingSync do not have an automated sync policy, since the AppSet controller will handle triggering the sync operation instead
|
||||
if validApps[i].Spec.SyncPolicy != nil && validApps[i].Spec.SyncPolicy.IsAutomatedSyncEnabled() {
|
||||
pruneEnabled = validApps[i].Spec.SyncPolicy.Automated.Prune
|
||||
validApps[i].Spec.SyncPolicy.Automated = nil
|
||||
if desiredApplications[i].Spec.SyncPolicy != nil && desiredApplications[i].Spec.SyncPolicy.IsAutomatedSyncEnabled() {
|
||||
pruneEnabled = desiredApplications[i].Spec.SyncPolicy.Automated.Prune
|
||||
desiredApplications[i].Spec.SyncPolicy.Automated.Enabled = ptr.To(false)
|
||||
}
|
||||
|
||||
appSetStatusPending := false
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, validApps[i].Name)
|
||||
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == "Pending" {
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, desiredApplications[i].Name)
|
||||
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == argov1alpha1.ProgressiveSyncPending {
|
||||
// only trigger a sync for Applications that are in Pending status, since this is governed by maxUpdate
|
||||
appSetStatusPending = true
|
||||
}
|
||||
|
||||
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
|
||||
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
|
||||
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
validApps[i] = syncApplication(validApps[i], pruneEnabled)
|
||||
// check appsToSync to determine which Applications are ready to be updated and which should be skipped
|
||||
if appsToSync[desiredApplications[i].Name] && appSetStatusPending {
|
||||
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", desiredApplications[i].Name, pruneEnabled)
|
||||
desiredApplications[i] = syncApplication(desiredApplications[i], pruneEnabled)
|
||||
}
|
||||
rolloutApps = append(rolloutApps, validApps[i])
|
||||
|
||||
rolloutApps = append(rolloutApps, desiredApplications[i])
|
||||
}
|
||||
return rolloutApps
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -26,14 +26,10 @@ import (
|
||||
"github.com/go-playground/webhooks/v6/github"
|
||||
"github.com/go-playground/webhooks/v6/gitlab"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/guard"
|
||||
)
|
||||
|
||||
const payloadQueueSize = 50000
|
||||
|
||||
const panicMsgAppSet = "panic while processing applicationset-controller webhook event"
|
||||
|
||||
type WebhookHandler struct {
|
||||
sync.WaitGroup // for testing
|
||||
github *github.Webhook
|
||||
@@ -106,7 +102,6 @@ func NewWebhookHandler(webhookParallelism int, argocdSettingsMgr *argosettings.S
|
||||
}
|
||||
|
||||
func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
|
||||
compLog := log.WithField("component", "applicationset-webhook")
|
||||
for i := 0; i < webhookParallelism; i++ {
|
||||
h.Add(1)
|
||||
go func() {
|
||||
@@ -116,7 +111,7 @@ func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
guard.RecoverAndLog(func() { h.HandleEvent(payload) }, compLog, panicMsgAppSet)
|
||||
h.HandleEvent(payload)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
11
assets/swagger.json
generated
11
assets/swagger.json
generated
@@ -7077,7 +7077,7 @@
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)"
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource"
|
||||
},
|
||||
"step": {
|
||||
"type": "string",
|
||||
@@ -7322,11 +7322,6 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/applicationv1alpha1ResourceStatus"
|
||||
}
|
||||
},
|
||||
"resourcesCount": {
|
||||
"description": "ResourcesCount is the total number of resources managed by this application set. The count may be higher than actual number of items in the Resources field when\nthe number of managed resources exceeds the limit imposed by the controller (to avoid making the status field too large).",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -10577,8 +10572,8 @@
|
||||
"type": "string"
|
||||
},
|
||||
"targetBranch": {
|
||||
"description": "TargetBranch is the branch from which hydrated manifests will be synced.\nIf HydrateTo is not set, this is also the branch to which hydrated manifests are committed.",
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"title": "TargetBranch is the branch to which hydrated manifests should be committed"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
logutils "github.com/argoproj/argo-cd/v3/util/log"
|
||||
"github.com/argoproj/argo-cd/v3/util/profile"
|
||||
"github.com/argoproj/argo-cd/v3/util/tls"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/controllers"
|
||||
@@ -80,7 +79,6 @@ func NewCommand() *cobra.Command {
|
||||
enableScmProviders bool
|
||||
webhookParallelism int
|
||||
tokenRefStrictMode bool
|
||||
maxResourcesStatusCount int
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
@@ -171,15 +169,6 @@ func NewCommand() *cobra.Command {
|
||||
log.Error(err, "unable to start manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
pprofMux := http.NewServeMux()
|
||||
profile.RegisterProfiler(pprofMux)
|
||||
// This looks a little strange. Eg, not using ctrl.Options PprofBindAddress and then adding the pprof mux
|
||||
// to the metrics server. However, it allows for the controller to dynamically expose the pprof endpoints
|
||||
// and use the existing metrics server, the same pattern that the application controller and api-server follow.
|
||||
if err = mgr.AddMetricsServerExtraHandler("/debug/pprof/", pprofMux); err != nil {
|
||||
log.Error(err, "failed to register pprof handlers")
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(mgr.GetConfig())
|
||||
errors.CheckError(err)
|
||||
k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig())
|
||||
@@ -242,7 +231,6 @@ func NewCommand() *cobra.Command {
|
||||
GlobalPreservedAnnotations: globalPreservedAnnotations,
|
||||
GlobalPreservedLabels: globalPreservedLabels,
|
||||
Metrics: &metrics,
|
||||
MaxResourcesStatusCount: maxResourcesStatusCount,
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -287,7 +275,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&metricsAplicationsetLabels, "metrics-applicationset-labels", []string{}, "List of Application labels that will be added to the argocd_applicationset_labels metric")
|
||||
command.Flags().BoolVar(&enableGitHubAPIMetrics, "enable-github-api-metrics", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_GITHUB_API_METRICS", false), "Enable GitHub API metrics for generators that use the GitHub API")
|
||||
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 0, 0, math.MaxInt), "Max number of resources stored in appset status.")
|
||||
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -80,7 +80,6 @@ func NewCommand() *cobra.Command {
|
||||
includeHiddenDirectories bool
|
||||
cmpUseManifestGeneratePaths bool
|
||||
ociMediaTypes []string
|
||||
enableBuiltinGitConfig bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -156,7 +155,6 @@ func NewCommand() *cobra.Command {
|
||||
IncludeHiddenDirectories: includeHiddenDirectories,
|
||||
CMPUseManifestGeneratePaths: cmpUseManifestGeneratePaths,
|
||||
OCIMediaTypes: ociMediaTypes,
|
||||
EnableBuiltinGitConfig: enableBuiltinGitConfig,
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -266,7 +264,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&includeHiddenDirectories, "include-hidden-directories", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_INCLUDE_HIDDEN_DIRECTORIES", false), "Include hidden directories from Git")
|
||||
command.Flags().BoolVar(&cmpUseManifestGeneratePaths, "plugin-use-manifest-generate-paths", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_PLUGIN_USE_MANIFEST_GENERATE_PATHS", false), "Pass the resources described in argocd.argoproj.io/manifest-generate-paths value to the cmpserver to generate the application manifests.")
|
||||
command.Flags().StringSliceVar(&ociMediaTypes, "oci-layer-media-types", env.StringsFromEnv("ARGOCD_REPO_SERVER_OCI_LAYER_MEDIA_TYPES", []string{"application/vnd.oci.image.layer.v1.tar", "application/vnd.oci.image.layer.v1.tar+gzip", "application/vnd.cncf.helm.chart.content.v1.tar+gzip"}, ","), "Comma separated list of allowed media types for OCI media types. This only accounts for media types within layers.")
|
||||
command.Flags().BoolVar(&enableBuiltinGitConfig, "enable-builtin-git-config", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG", true), "Enable builtin git configuration options that are required for correct argocd-repo-server operation.")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
|
||||
@@ -30,12 +30,11 @@ func NewNotificationsCommand() *cobra.Command {
|
||||
)
|
||||
|
||||
var argocdService service.Service
|
||||
|
||||
toolsCommand := cmd.NewToolsCommand(
|
||||
"notifications",
|
||||
"argocd admin notifications",
|
||||
applications,
|
||||
settings.GetFactorySettingsForCLI(func() service.Service { return argocdService }, "argocd-notifications-secret", "argocd-notifications-cm", false),
|
||||
settings.GetFactorySettingsForCLI(argocdService, "argocd-notifications-secret", "argocd-notifications-cm", false),
|
||||
func(clientConfig clientcmd.ClientConfig) {
|
||||
k8sCfg, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
|
||||
97
commitserver/apiclient/mocks/Clientset.go
generated
97
commitserver/apiclient/mocks/Clientset.go
generated
@@ -1,14 +1,101 @@
|
||||
// Code generated by mockery; DO NOT EDIT.
|
||||
// github.com/vektra/mockery
|
||||
// template: testify
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
"github.com/argoproj/argo-cd/v3/util/io"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type Clientset struct {
|
||||
CommitServiceClient apiclient.CommitServiceClient
|
||||
// NewClientset creates a new instance of Clientset. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewClientset(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *Clientset {
|
||||
mock := &Clientset{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
func (c *Clientset) NewCommitServerClient() (utilio.Closer, apiclient.CommitServiceClient, error) {
|
||||
return utilio.NopCloser, c.CommitServiceClient, nil
|
||||
// Clientset is an autogenerated mock type for the Clientset type
|
||||
type Clientset struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type Clientset_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *Clientset) EXPECT() *Clientset_Expecter {
|
||||
return &Clientset_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// NewCommitServerClient provides a mock function for the type Clientset
|
||||
func (_mock *Clientset) NewCommitServerClient() (io.Closer, apiclient.CommitServiceClient, error) {
|
||||
ret := _mock.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for NewCommitServerClient")
|
||||
}
|
||||
|
||||
var r0 io.Closer
|
||||
var r1 apiclient.CommitServiceClient
|
||||
var r2 error
|
||||
if returnFunc, ok := ret.Get(0).(func() (io.Closer, apiclient.CommitServiceClient, error)); ok {
|
||||
return returnFunc()
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func() io.Closer); ok {
|
||||
r0 = returnFunc()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(io.Closer)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func() apiclient.CommitServiceClient); ok {
|
||||
r1 = returnFunc()
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(apiclient.CommitServiceClient)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(2).(func() error); ok {
|
||||
r2 = returnFunc()
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// Clientset_NewCommitServerClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewCommitServerClient'
|
||||
type Clientset_NewCommitServerClient_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// NewCommitServerClient is a helper method to define mock.On call
|
||||
func (_e *Clientset_Expecter) NewCommitServerClient() *Clientset_NewCommitServerClient_Call {
|
||||
return &Clientset_NewCommitServerClient_Call{Call: _e.mock.On("NewCommitServerClient")}
|
||||
}
|
||||
|
||||
func (_c *Clientset_NewCommitServerClient_Call) Run(run func()) *Clientset_NewCommitServerClient_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Clientset_NewCommitServerClient_Call) Return(closer io.Closer, commitServiceClient apiclient.CommitServiceClient, err error) *Clientset_NewCommitServerClient_Call {
|
||||
_c.Call.Return(closer, commitServiceClient, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Clientset_NewCommitServerClient_Call) RunAndReturn(run func() (io.Closer, apiclient.CommitServiceClient, error)) *Clientset_NewCommitServerClient_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
@@ -1878,7 +1878,7 @@ func (ctrl *ApplicationController) processAppHydrateQueueItem() (processNext boo
|
||||
return
|
||||
}
|
||||
|
||||
ctrl.hydrator.ProcessAppHydrateQueueItem(origApp.DeepCopy())
|
||||
ctrl.hydrator.ProcessAppHydrateQueueItem(origApp)
|
||||
|
||||
log.WithFields(applog.GetAppLogFields(origApp)).Debug("Successfully processed app hydrate queue item")
|
||||
return
|
||||
|
||||
@@ -4,9 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -101,41 +99,47 @@ func NewHydrator(dependencies Dependencies, statusRefreshTimeout time.Duration,
|
||||
// It's likely that multiple applications will trigger hydration at the same time. The hydration queue key is meant to
|
||||
// dedupe these requests.
|
||||
func (h *Hydrator) ProcessAppHydrateQueueItem(origApp *appv1.Application) {
|
||||
origApp = origApp.DeepCopy()
|
||||
app := origApp.DeepCopy()
|
||||
|
||||
if app.Spec.SourceHydrator == nil {
|
||||
return
|
||||
}
|
||||
|
||||
logCtx := log.WithFields(applog.GetAppLogFields(app))
|
||||
|
||||
logCtx.Debug("Processing app hydrate queue item")
|
||||
|
||||
needsHydration, reason := appNeedsHydration(app)
|
||||
if needsHydration {
|
||||
app.Status.SourceHydrator.CurrentOperation = &appv1.HydrateOperation{
|
||||
StartedAt: metav1.Now(),
|
||||
FinishedAt: nil,
|
||||
Phase: appv1.HydrateOperationPhaseHydrating,
|
||||
SourceHydrator: *app.Spec.SourceHydrator,
|
||||
}
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
// TODO: don't reuse statusRefreshTimeout. Create a new timeout for hydration.
|
||||
needsHydration, reason := appNeedsHydration(origApp, h.statusRefreshTimeout)
|
||||
if !needsHydration {
|
||||
return
|
||||
}
|
||||
|
||||
needsRefresh := app.Status.SourceHydrator.CurrentOperation.Phase == appv1.HydrateOperationPhaseHydrating && metav1.Now().Sub(app.Status.SourceHydrator.CurrentOperation.StartedAt.Time) > h.statusRefreshTimeout
|
||||
if needsHydration || needsRefresh {
|
||||
logCtx.WithField("reason", reason).Info("Hydrating app")
|
||||
h.dependencies.AddHydrationQueueItem(getHydrationQueueKey(app))
|
||||
} else {
|
||||
logCtx.WithField("reason", reason).Debug("Skipping hydration")
|
||||
logCtx.WithField("reason", reason).Info("Hydrating app")
|
||||
|
||||
app.Status.SourceHydrator.CurrentOperation = &appv1.HydrateOperation{
|
||||
StartedAt: metav1.Now(),
|
||||
FinishedAt: nil,
|
||||
Phase: appv1.HydrateOperationPhaseHydrating,
|
||||
SourceHydrator: *app.Spec.SourceHydrator,
|
||||
}
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
origApp.Status.SourceHydrator = app.Status.SourceHydrator
|
||||
h.dependencies.AddHydrationQueueItem(getHydrationQueueKey(app))
|
||||
|
||||
logCtx.Debug("Successfully processed app hydrate queue item")
|
||||
}
|
||||
|
||||
func getHydrationQueueKey(app *appv1.Application) types.HydrationQueueKey {
|
||||
destinationBranch := app.Spec.SourceHydrator.SyncSource.TargetBranch
|
||||
if app.Spec.SourceHydrator.HydrateTo != nil {
|
||||
destinationBranch = app.Spec.SourceHydrator.HydrateTo.TargetBranch
|
||||
}
|
||||
key := types.HydrationQueueKey{
|
||||
SourceRepoURL: git.NormalizeGitURLAllowInvalid(app.Spec.SourceHydrator.DrySource.RepoURL),
|
||||
SourceTargetRevision: app.Spec.SourceHydrator.DrySource.TargetRevision,
|
||||
DestinationBranch: app.Spec.GetHydrateToSource().TargetRevision,
|
||||
DestinationBranch: destinationBranch,
|
||||
}
|
||||
return key
|
||||
}
|
||||
@@ -144,92 +148,43 @@ func getHydrationQueueKey(app *appv1.Application) types.HydrationQueueKey {
|
||||
// hydration key, hydrates their latest commit, and updates their status accordingly. If the hydration fails, it marks
|
||||
// the operation as failed and logs the error. If successful, it updates the operation to indicate that hydration was
|
||||
// successful and requests a refresh of the applications to pick up the new hydrated commit.
|
||||
func (h *Hydrator) ProcessHydrationQueueItem(hydrationKey types.HydrationQueueKey) {
|
||||
func (h *Hydrator) ProcessHydrationQueueItem(hydrationKey types.HydrationQueueKey) (processNext bool) {
|
||||
logCtx := log.WithFields(log.Fields{
|
||||
"sourceRepoURL": hydrationKey.SourceRepoURL,
|
||||
"sourceTargetRevision": hydrationKey.SourceTargetRevision,
|
||||
"destinationBranch": hydrationKey.DestinationBranch,
|
||||
})
|
||||
|
||||
// Get all applications sharing the same hydration key
|
||||
apps, err := h.getAppsForHydrationKey(hydrationKey)
|
||||
if err != nil {
|
||||
// If we get an error here, we cannot proceed with hydration and we do not know
|
||||
// which apps to update with the failure. The best we can do is log an error in
|
||||
// the controller and wait for statusRefreshTimeout to retry
|
||||
logCtx.WithError(err).Error("failed to get apps for hydration")
|
||||
relevantApps, drySHA, hydratedSHA, err := h.hydrateAppsLatestCommit(logCtx, hydrationKey)
|
||||
if len(relevantApps) == 0 {
|
||||
// return early if there are no relevant apps found to hydrate
|
||||
// otherwise you'll be stuck in hydrating
|
||||
logCtx.Info("Skipping hydration since there are no relevant apps found to hydrate")
|
||||
return
|
||||
}
|
||||
logCtx.WithField("appCount", len(apps))
|
||||
|
||||
// FIXME: we might end up in a race condition here where an HydrationQueueItem is processed
|
||||
// before all applications had their CurrentOperation set by ProcessAppHydrateQueueItem.
|
||||
// This would cause this method to update "old" CurrentOperation.
|
||||
// It should only start hydration if all apps are in the HydrateOperationPhaseHydrating phase.
|
||||
raceDetected := false
|
||||
for _, app := range apps {
|
||||
if app.Status.SourceHydrator.CurrentOperation == nil || app.Status.SourceHydrator.CurrentOperation.Phase != appv1.HydrateOperationPhaseHydrating {
|
||||
raceDetected = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if raceDetected {
|
||||
logCtx.Warn("race condition detected: not all apps are in HydrateOperationPhaseHydrating phase")
|
||||
}
|
||||
|
||||
// validate all the applications to make sure they are all correctly configured.
|
||||
// All applications sharing the same hydration key must succeed for the hydration to be processed.
|
||||
projects, validationErrors := h.validateApplications(apps)
|
||||
if len(validationErrors) > 0 {
|
||||
// For the applications that have an error, set the specific error in their status.
|
||||
// Applications without error will still fail with a generic error since the hydration cannot be partial
|
||||
genericError := genericHydrationError(validationErrors)
|
||||
for _, app := range apps {
|
||||
if err, ok := validationErrors[app.QualifiedName()]; ok {
|
||||
logCtx = logCtx.WithFields(applog.GetAppLogFields(app))
|
||||
logCtx.Errorf("failed to validate hydration app: %v", err)
|
||||
h.setAppHydratorError(app, err)
|
||||
} else {
|
||||
h.setAppHydratorError(app, genericError)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Hydrate all the apps
|
||||
drySHA, hydratedSHA, appErrors, err := h.hydrate(logCtx, apps, projects)
|
||||
if err != nil {
|
||||
// If there is a single error, it affects each applications
|
||||
for i := range apps {
|
||||
appErrors[apps[i].QualifiedName()] = err
|
||||
}
|
||||
}
|
||||
if drySHA != "" {
|
||||
logCtx = logCtx.WithField("drySHA", drySHA)
|
||||
}
|
||||
if len(appErrors) > 0 {
|
||||
// For the applications that have an error, set the specific error in their status.
|
||||
// Applications without error will still fail with a generic error since the hydration cannot be partial
|
||||
genericError := genericHydrationError(appErrors)
|
||||
for _, app := range apps {
|
||||
if drySHA != "" {
|
||||
// If we have a drySHA, we can set it on the app status
|
||||
app.Status.SourceHydrator.CurrentOperation.DrySHA = drySHA
|
||||
}
|
||||
if err, ok := appErrors[app.QualifiedName()]; ok {
|
||||
logCtx = logCtx.WithFields(applog.GetAppLogFields(app))
|
||||
logCtx.Errorf("failed to hydrate app: %v", err)
|
||||
h.setAppHydratorError(app, err)
|
||||
} else {
|
||||
h.setAppHydratorError(app, genericError)
|
||||
}
|
||||
if err != nil {
|
||||
logCtx.WithField("appCount", len(relevantApps)).WithError(err).Error("Failed to hydrate apps")
|
||||
for _, app := range relevantApps {
|
||||
origApp := app.DeepCopy()
|
||||
app.Status.SourceHydrator.CurrentOperation.Phase = appv1.HydrateOperationPhaseFailed
|
||||
failedAt := metav1.Now()
|
||||
app.Status.SourceHydrator.CurrentOperation.FinishedAt = &failedAt
|
||||
app.Status.SourceHydrator.CurrentOperation.Message = fmt.Sprintf("Failed to hydrate revision %q: %v", drySHA, err.Error())
|
||||
// We may or may not have gotten far enough in the hydration process to get a non-empty SHA, but set it just
|
||||
// in case we did.
|
||||
app.Status.SourceHydrator.CurrentOperation.DrySHA = drySHA
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
logCtx = logCtx.WithFields(applog.GetAppLogFields(app))
|
||||
logCtx.Errorf("Failed to hydrate app: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
logCtx.Debug("Successfully hydrated apps")
|
||||
logCtx.WithField("appCount", len(relevantApps)).Debug("Successfully hydrated apps")
|
||||
finishedAt := metav1.Now()
|
||||
for _, app := range apps {
|
||||
for _, app := range relevantApps {
|
||||
origApp := app.DeepCopy()
|
||||
operation := &appv1.HydrateOperation{
|
||||
StartedAt: app.Status.SourceHydrator.CurrentOperation.StartedAt,
|
||||
@@ -247,123 +202,118 @@ func (h *Hydrator) ProcessHydrationQueueItem(hydrationKey types.HydrationQueueKe
|
||||
SourceHydrator: app.Status.SourceHydrator.CurrentOperation.SourceHydrator,
|
||||
}
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
|
||||
// Request a refresh since we pushed a new commit.
|
||||
err := h.dependencies.RequestAppRefresh(app.Name, app.Namespace)
|
||||
if err != nil {
|
||||
logCtx.WithFields(applog.GetAppLogFields(app)).WithError(err).Error("Failed to request app refresh after hydration")
|
||||
logCtx.WithField("app", app.QualifiedName()).WithError(err).Error("Failed to request app refresh after hydration")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// setAppHydratorError updates the CurrentOperation with the error information.
|
||||
func (h *Hydrator) setAppHydratorError(app *appv1.Application, err error) {
|
||||
// if the operation is not in progress, we do not update the status
|
||||
if app.Status.SourceHydrator.CurrentOperation.Phase != appv1.HydrateOperationPhaseHydrating {
|
||||
return
|
||||
func (h *Hydrator) hydrateAppsLatestCommit(logCtx *log.Entry, hydrationKey types.HydrationQueueKey) ([]*appv1.Application, string, string, error) {
|
||||
relevantApps, projects, err := h.getRelevantAppsAndProjectsForHydration(logCtx, hydrationKey)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("failed to get relevant apps for hydration: %w", err)
|
||||
}
|
||||
|
||||
origApp := app.DeepCopy()
|
||||
app.Status.SourceHydrator.CurrentOperation.Phase = appv1.HydrateOperationPhaseFailed
|
||||
failedAt := metav1.Now()
|
||||
app.Status.SourceHydrator.CurrentOperation.FinishedAt = &failedAt
|
||||
app.Status.SourceHydrator.CurrentOperation.Message = fmt.Sprintf("Failed to hydrate: %v", err.Error())
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
dryRevision, hydratedRevision, err := h.hydrate(logCtx, relevantApps, projects)
|
||||
if err != nil {
|
||||
return relevantApps, dryRevision, "", fmt.Errorf("failed to hydrate apps: %w", err)
|
||||
}
|
||||
|
||||
return relevantApps, dryRevision, hydratedRevision, nil
|
||||
}
|
||||
|
||||
// getAppsForHydrationKey returns the applications matching the hydration key.
|
||||
func (h *Hydrator) getAppsForHydrationKey(hydrationKey types.HydrationQueueKey) ([]*appv1.Application, error) {
|
||||
func (h *Hydrator) getRelevantAppsAndProjectsForHydration(logCtx *log.Entry, hydrationKey types.HydrationQueueKey) ([]*appv1.Application, map[string]*appv1.AppProject, error) {
|
||||
// Get all apps
|
||||
apps, err := h.dependencies.GetProcessableApps()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list apps: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to list apps: %w", err)
|
||||
}
|
||||
|
||||
var relevantApps []*appv1.Application
|
||||
projects := make(map[string]*appv1.AppProject)
|
||||
uniquePaths := make(map[string]bool, len(apps.Items))
|
||||
for _, app := range apps.Items {
|
||||
if app.Spec.SourceHydrator == nil {
|
||||
continue
|
||||
}
|
||||
appKey := getHydrationQueueKey(&app)
|
||||
if appKey != hydrationKey {
|
||||
|
||||
if !git.SameURL(app.Spec.SourceHydrator.DrySource.RepoURL, hydrationKey.SourceRepoURL) ||
|
||||
app.Spec.SourceHydrator.DrySource.TargetRevision != hydrationKey.SourceTargetRevision {
|
||||
continue
|
||||
}
|
||||
relevantApps = append(relevantApps, &app)
|
||||
}
|
||||
return relevantApps, nil
|
||||
}
|
||||
|
||||
// validateApplications checks that all applications are valid for hydration.
|
||||
func (h *Hydrator) validateApplications(apps []*appv1.Application) (map[string]*appv1.AppProject, map[string]error) {
|
||||
projects := make(map[string]*appv1.AppProject)
|
||||
errors := make(map[string]error)
|
||||
uniquePaths := make(map[string]string, len(apps))
|
||||
|
||||
for _, app := range apps {
|
||||
// Get the project for the app and validate if the app is allowed to use the source.
|
||||
// We can't short-circuit this even if we have seen this project before, because we need to verify that this
|
||||
// particular app is allowed to use this project.
|
||||
proj, err := h.dependencies.GetProcessableAppProj(app)
|
||||
if err != nil {
|
||||
errors[app.QualifiedName()] = fmt.Errorf("failed to get project %q: %w", app.Spec.Project, err)
|
||||
destinationBranch := app.Spec.SourceHydrator.SyncSource.TargetBranch
|
||||
if app.Spec.SourceHydrator.HydrateTo != nil {
|
||||
destinationBranch = app.Spec.SourceHydrator.HydrateTo.TargetBranch
|
||||
}
|
||||
if destinationBranch != hydrationKey.DestinationBranch {
|
||||
continue
|
||||
}
|
||||
|
||||
path := app.Spec.SourceHydrator.SyncSource.Path
|
||||
// ensure that the path is always set to a path that doesn't resolve to the root of the repo
|
||||
if IsRootPath(path) {
|
||||
return nil, nil, fmt.Errorf("app %q has path %q which resolves to repository root", app.QualifiedName(), path)
|
||||
}
|
||||
|
||||
var proj *appv1.AppProject
|
||||
// We can't short-circuit this even if we have seen this project before, because we need to verify that this
|
||||
// particular app is allowed to use this project. That logic is in GetProcessableAppProj.
|
||||
proj, err = h.dependencies.GetProcessableAppProj(&app)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get project %q for app %q: %w", app.Spec.Project, app.QualifiedName(), err)
|
||||
}
|
||||
permitted := proj.IsSourcePermitted(app.Spec.GetSource())
|
||||
if !permitted {
|
||||
errors[app.QualifiedName()] = fmt.Errorf("application repo %s is not permitted in project '%s'", app.Spec.GetSource().RepoURL, proj.Name)
|
||||
// Log and skip. We don't want to fail the entire operation because of one app.
|
||||
logCtx.Warnf("App %q is not permitted to use source %q", app.QualifiedName(), app.Spec.Source.String())
|
||||
continue
|
||||
}
|
||||
projects[app.Spec.Project] = proj
|
||||
|
||||
// Disallow hydrating to the repository root.
|
||||
// Hydrating to root would overwrite or delete files at the top level of the repo,
|
||||
// which can break other applications or shared configuration.
|
||||
// Every hydrated app must write into a subdirectory instead.
|
||||
destPath := app.Spec.SourceHydrator.SyncSource.Path
|
||||
if IsRootPath(destPath) {
|
||||
errors[app.QualifiedName()] = fmt.Errorf("app is configured to hydrate to the repository root (branch %q, path %q) which is not allowed", app.Spec.GetHydrateToSource().TargetRevision, destPath)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: test the dupe detection
|
||||
// TODO: normalize the path to avoid "path/.." from being treated as different from "."
|
||||
if appName, ok := uniquePaths[destPath]; ok {
|
||||
errors[app.QualifiedName()] = fmt.Errorf("app %s hydrator use the same destination: %v", appName, app.Spec.SourceHydrator.SyncSource.Path)
|
||||
errors[appName] = fmt.Errorf("app %s hydrator use the same destination: %v", app.QualifiedName(), app.Spec.SourceHydrator.SyncSource.Path)
|
||||
continue
|
||||
if _, ok := uniquePaths[path]; ok {
|
||||
return nil, nil, fmt.Errorf("multiple app hydrators use the same destination: %v", app.Spec.SourceHydrator.SyncSource.Path)
|
||||
}
|
||||
uniquePaths[destPath] = app.QualifiedName()
|
||||
}
|
||||
uniquePaths[path] = true
|
||||
|
||||
// If there are any errors, return nil for projects to avoid possible partial processing.
|
||||
if len(errors) > 0 {
|
||||
projects = nil
|
||||
relevantApps = append(relevantApps, &app)
|
||||
}
|
||||
|
||||
return projects, errors
|
||||
return relevantApps, projects, nil
|
||||
}
|
||||
|
||||
func (h *Hydrator) hydrate(logCtx *log.Entry, apps []*appv1.Application, projects map[string]*appv1.AppProject) (string, string, map[string]error, error) {
|
||||
errors := make(map[string]error)
|
||||
func (h *Hydrator) hydrate(logCtx *log.Entry, apps []*appv1.Application, projects map[string]*appv1.AppProject) (string, string, error) {
|
||||
if len(apps) == 0 {
|
||||
return "", "", nil, nil
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
// These values are the same for all apps being hydrated together, so just get them from the first app.
|
||||
repoURL := apps[0].Spec.GetHydrateToSource().RepoURL
|
||||
targetBranch := apps[0].Spec.GetHydrateToSource().TargetRevision
|
||||
// FIXME: As a convenience, the commit server will create the syncBranch if it does not exist. If the
|
||||
// targetBranch does not exist, it will create it based on the syncBranch. On the next line, we take
|
||||
// the `syncBranch` from the first app and assume that they're all configured the same. Instead, if any
|
||||
// app has a different syncBranch, we should send the commit server an empty string and allow it to
|
||||
// create the targetBranch as an orphan since we can't reliable determine a reasonable base.
|
||||
repoURL := apps[0].Spec.SourceHydrator.DrySource.RepoURL
|
||||
syncBranch := apps[0].Spec.SourceHydrator.SyncSource.TargetBranch
|
||||
targetBranch := apps[0].Spec.GetHydrateToSource().TargetRevision
|
||||
|
||||
// Disallow hydrating to the repository root.
|
||||
// Hydrating to root would overwrite or delete files at the top level of the repo,
|
||||
// which can break other applications or shared configuration.
|
||||
// Every hydrated app must write into a subdirectory instead.
|
||||
|
||||
for _, app := range apps {
|
||||
destPath := app.Spec.SourceHydrator.SyncSource.Path
|
||||
if IsRootPath(destPath) {
|
||||
return "", "", fmt.Errorf(
|
||||
"app %q is configured to hydrate to the repository root (branch %q, path %q) which is not allowed",
|
||||
app.QualifiedName(), targetBranch, destPath,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Get a static SHA revision from the first app so that all apps are hydrated from the same revision.
|
||||
targetRevision, pathDetails, err := h.getManifests(context.Background(), apps[0], "", projects[apps[0].Spec.Project])
|
||||
if err != nil {
|
||||
errors[apps[0].QualifiedName()] = fmt.Errorf("failed to get manifests: %w", err)
|
||||
return "", "", errors, nil
|
||||
return "", "", fmt.Errorf("failed to get manifests for app %q: %w", apps[0].QualifiedName(), err)
|
||||
}
|
||||
paths := []*commitclient.PathDetails{pathDetails}
|
||||
|
||||
@@ -374,18 +324,18 @@ func (h *Hydrator) hydrate(logCtx *log.Entry, apps []*appv1.Application, project
|
||||
app := app
|
||||
eg.Go(func() error {
|
||||
_, pathDetails, err = h.getManifests(ctx, app, targetRevision, projects[app.Spec.Project])
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if err != nil {
|
||||
errors[app.QualifiedName()] = fmt.Errorf("failed to get manifests: %w", err)
|
||||
return errors[app.QualifiedName()]
|
||||
return fmt.Errorf("failed to get manifests for app %q: %w", app.QualifiedName(), err)
|
||||
}
|
||||
mu.Lock()
|
||||
paths = append(paths, pathDetails)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return targetRevision, "", errors, nil
|
||||
err = eg.Wait()
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get manifests for apps: %w", err)
|
||||
}
|
||||
|
||||
// If all the apps are under the same project, use that project. Otherwise, use an empty string to indicate that we
|
||||
@@ -394,19 +344,18 @@ func (h *Hydrator) hydrate(logCtx *log.Entry, apps []*appv1.Application, project
|
||||
if len(projects) == 1 {
|
||||
for p := range projects {
|
||||
project = p
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Get the commit metadata for the target revision.
|
||||
revisionMetadata, err := h.getRevisionMetadata(context.Background(), repoURL, project, targetRevision)
|
||||
if err != nil {
|
||||
return targetRevision, "", errors, fmt.Errorf("failed to get revision metadata for %q: %w", targetRevision, err)
|
||||
return "", "", fmt.Errorf("failed to get revision metadata for %q: %w", targetRevision, err)
|
||||
}
|
||||
|
||||
repo, err := h.dependencies.GetWriteCredentials(context.Background(), repoURL, project)
|
||||
if err != nil {
|
||||
return targetRevision, "", errors, fmt.Errorf("failed to get hydrator credentials: %w", err)
|
||||
return "", "", fmt.Errorf("failed to get hydrator credentials: %w", err)
|
||||
}
|
||||
if repo == nil {
|
||||
// Try without credentials.
|
||||
@@ -418,11 +367,11 @@ func (h *Hydrator) hydrate(logCtx *log.Entry, apps []*appv1.Application, project
|
||||
// get the commit message template
|
||||
commitMessageTemplate, err := h.dependencies.GetHydratorCommitMessageTemplate()
|
||||
if err != nil {
|
||||
return targetRevision, "", errors, fmt.Errorf("failed to get hydrated commit message template: %w", err)
|
||||
return "", "", fmt.Errorf("failed to get hydrated commit message template: %w", err)
|
||||
}
|
||||
commitMessage, errMsg := getTemplatedCommitMessage(repoURL, targetRevision, commitMessageTemplate, revisionMetadata)
|
||||
if errMsg != nil {
|
||||
return targetRevision, "", errors, fmt.Errorf("failed to get hydrator commit templated message: %w", errMsg)
|
||||
return "", "", fmt.Errorf("failed to get hydrator commit templated message: %w", errMsg)
|
||||
}
|
||||
|
||||
manifestsRequest := commitclient.CommitHydratedManifestsRequest{
|
||||
@@ -437,14 +386,14 @@ func (h *Hydrator) hydrate(logCtx *log.Entry, apps []*appv1.Application, project
|
||||
|
||||
closer, commitService, err := h.commitClientset.NewCommitServerClient()
|
||||
if err != nil {
|
||||
return targetRevision, "", errors, fmt.Errorf("failed to create commit service: %w", err)
|
||||
return targetRevision, "", fmt.Errorf("failed to create commit service: %w", err)
|
||||
}
|
||||
defer utilio.Close(closer)
|
||||
resp, err := commitService.CommitHydratedManifests(context.Background(), &manifestsRequest)
|
||||
if err != nil {
|
||||
return targetRevision, "", errors, fmt.Errorf("failed to commit hydrated manifests: %w", err)
|
||||
return targetRevision, "", fmt.Errorf("failed to commit hydrated manifests: %w", err)
|
||||
}
|
||||
return targetRevision, resp.HydratedSha, errors, nil
|
||||
return targetRevision, resp.HydratedSha, nil
|
||||
}
|
||||
|
||||
// getManifests gets the manifests for the given application and target revision. It returns the resolved revision
|
||||
@@ -507,27 +456,34 @@ func (h *Hydrator) getRevisionMetadata(ctx context.Context, repoURL, project, re
|
||||
}
|
||||
|
||||
// appNeedsHydration answers if application needs manifests hydrated.
|
||||
func appNeedsHydration(app *appv1.Application) (needsHydration bool, reason string) {
|
||||
switch {
|
||||
case app.Spec.SourceHydrator == nil:
|
||||
func appNeedsHydration(app *appv1.Application, statusHydrateTimeout time.Duration) (needsHydration bool, reason string) {
|
||||
if app.Spec.SourceHydrator == nil {
|
||||
return false, "source hydrator not configured"
|
||||
case app.Status.SourceHydrator.CurrentOperation == nil:
|
||||
return true, "no previous hydrate operation"
|
||||
case app.Status.SourceHydrator.CurrentOperation.Phase == appv1.HydrateOperationPhaseHydrating:
|
||||
return false, "hydration operation already in progress"
|
||||
}
|
||||
|
||||
var hydratedAt *metav1.Time
|
||||
if app.Status.SourceHydrator.CurrentOperation != nil {
|
||||
hydratedAt = &app.Status.SourceHydrator.CurrentOperation.StartedAt
|
||||
}
|
||||
|
||||
switch {
|
||||
case app.IsHydrateRequested():
|
||||
return true, "hydrate requested"
|
||||
case app.Status.SourceHydrator.CurrentOperation == nil:
|
||||
return true, "no previous hydrate operation"
|
||||
case !app.Spec.SourceHydrator.DeepEquals(app.Status.SourceHydrator.CurrentOperation.SourceHydrator):
|
||||
return true, "spec.sourceHydrator differs"
|
||||
case app.Status.SourceHydrator.CurrentOperation.Phase == appv1.HydrateOperationPhaseFailed && metav1.Now().Sub(app.Status.SourceHydrator.CurrentOperation.FinishedAt.Time) > 2*time.Minute:
|
||||
return true, "previous hydrate operation failed more than 2 minutes ago"
|
||||
case hydratedAt == nil || hydratedAt.Add(statusHydrateTimeout).Before(time.Now().UTC()):
|
||||
return true, "hydration expired"
|
||||
}
|
||||
|
||||
return false, "hydration not needed"
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// getTemplatedCommitMessage gets the multi-line commit message based on the template defined in the configmap. It is a two step process:
|
||||
// 1. Get the metadata template engine would use to render the template
|
||||
// Gets the multi-line commit message based on the template defined in the configmap. It is a two step process:
|
||||
// 1. Get the metadata template engine would use to render the template
|
||||
// 2. Pass the output of Step 1 and Step 2 to template Render
|
||||
func getTemplatedCommitMessage(repoURL, revision, commitMessageTemplate string, dryCommitMetadata *appv1.RevisionMetadata) (string, error) {
|
||||
hydratorCommitMetadata, err := hydrator.GetCommitMetadata(repoURL, revision, dryCommitMetadata)
|
||||
@@ -541,20 +497,6 @@ func getTemplatedCommitMessage(repoURL, revision, commitMessageTemplate string,
|
||||
return templatedCommitMsg, nil
|
||||
}
|
||||
|
||||
// genericHydrationError returns an error that summarizes the hydration errors for all applications.
|
||||
func genericHydrationError(validationErrors map[string]error) error {
|
||||
if len(validationErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
keys := slices.Sorted(maps.Keys(validationErrors))
|
||||
remainder := "has an error"
|
||||
if len(keys) > 1 {
|
||||
remainder = fmt.Sprintf("and %d more have errors", len(keys)-1)
|
||||
}
|
||||
return fmt.Errorf("cannot hydrate because application %s %s", keys[0], remainder)
|
||||
}
|
||||
|
||||
// IsRootPath returns whether the path references a root path
|
||||
func IsRootPath(path string) bool {
|
||||
clean := filepath.Clean(path)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
113
controller/hydrator/mocks/RepoGetter.go
generated
113
controller/hydrator/mocks/RepoGetter.go
generated
@@ -1,113 +0,0 @@
|
||||
// Code generated by mockery; DO NOT EDIT.
|
||||
// github.com/vektra/mockery
|
||||
// template: testify
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// NewRepoGetter creates a new instance of RepoGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewRepoGetter(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *RepoGetter {
|
||||
mock := &RepoGetter{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
// RepoGetter is an autogenerated mock type for the RepoGetter type
|
||||
type RepoGetter struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type RepoGetter_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *RepoGetter) EXPECT() *RepoGetter_Expecter {
|
||||
return &RepoGetter_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// GetRepository provides a mock function for the type RepoGetter
|
||||
func (_mock *RepoGetter) GetRepository(ctx context.Context, repoURL string, project string) (*v1alpha1.Repository, error) {
|
||||
ret := _mock.Called(ctx, repoURL, project)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetRepository")
|
||||
}
|
||||
|
||||
var r0 *v1alpha1.Repository
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, string, string) (*v1alpha1.Repository, error)); ok {
|
||||
return returnFunc(ctx, repoURL, project)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, string, string) *v1alpha1.Repository); ok {
|
||||
r0 = returnFunc(ctx, repoURL, project)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.Repository)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
|
||||
r1 = returnFunc(ctx, repoURL, project)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// RepoGetter_GetRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepository'
|
||||
type RepoGetter_GetRepository_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetRepository is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - repoURL string
|
||||
// - project string
|
||||
func (_e *RepoGetter_Expecter) GetRepository(ctx interface{}, repoURL interface{}, project interface{}) *RepoGetter_GetRepository_Call {
|
||||
return &RepoGetter_GetRepository_Call{Call: _e.mock.On("GetRepository", ctx, repoURL, project)}
|
||||
}
|
||||
|
||||
func (_c *RepoGetter_GetRepository_Call) Run(run func(ctx context.Context, repoURL string, project string)) *RepoGetter_GetRepository_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 string
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(string)
|
||||
}
|
||||
var arg2 string
|
||||
if args[2] != nil {
|
||||
arg2 = args[2].(string)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
arg2,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *RepoGetter_GetRepository_Call) Return(repository *v1alpha1.Repository, err error) *RepoGetter_GetRepository_Call {
|
||||
_c.Call.Return(repository, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *RepoGetter_GetRepository_Call) RunAndReturn(run func(ctx context.Context, repoURL string, project string) (*v1alpha1.Repository, error)) *RepoGetter_GetRepository_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -262,7 +263,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, project *v1alp
|
||||
// resources which in this case applies the live values in the configured
|
||||
// ignore differences fields.
|
||||
if syncOp.SyncOptions.HasOption("RespectIgnoreDifferences=true") {
|
||||
patchedTargets, err := normalizeTargetResources(compareResult)
|
||||
patchedTargets, err := normalizeTargetResources(openAPISchema, compareResult)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed to normalize target resources: %s", err)
|
||||
@@ -434,53 +435,65 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, project *v1alp
|
||||
// - applies normalization to the target resources based on the live resources
|
||||
// - copies ignored fields from the matching live resources: apply normalizer to the live resource,
|
||||
// calculates the patch performed by normalizer and applies the patch to the target resource
|
||||
func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructured, error) {
|
||||
// normalize live and target resources
|
||||
func normalizeTargetResources(openAPISchema openapi.Resources, cr *comparisonResult) ([]*unstructured.Unstructured, error) {
|
||||
// Normalize live and target resources (cleaning or aligning them)
|
||||
normalized, err := diff.Normalize(cr.reconciliationResult.Live, cr.reconciliationResult.Target, cr.diffConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
patchedTargets := []*unstructured.Unstructured{}
|
||||
|
||||
for idx, live := range cr.reconciliationResult.Live {
|
||||
normalizedTarget := normalized.Targets[idx]
|
||||
if normalizedTarget == nil {
|
||||
patchedTargets = append(patchedTargets, nil)
|
||||
continue
|
||||
}
|
||||
gvk := normalizedTarget.GroupVersionKind()
|
||||
|
||||
originalTarget := cr.reconciliationResult.Target[idx]
|
||||
if live == nil {
|
||||
// No live resource, just use target
|
||||
patchedTargets = append(patchedTargets, originalTarget)
|
||||
continue
|
||||
}
|
||||
|
||||
var lookupPatchMeta *strategicpatch.PatchMetaFromStruct
|
||||
versionedObject, err := scheme.Scheme.New(normalizedTarget.GroupVersionKind())
|
||||
if err == nil {
|
||||
meta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject)
|
||||
if err != nil {
|
||||
var (
|
||||
lookupPatchMeta strategicpatch.LookupPatchMeta
|
||||
versionedObject any
|
||||
)
|
||||
|
||||
// Load patch meta struct or OpenAPI schema for CRDs
|
||||
if versionedObject, err = scheme.Scheme.New(gvk); err == nil {
|
||||
if lookupPatchMeta, err = strategicpatch.NewPatchMetaFromStruct(versionedObject); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookupPatchMeta = &meta
|
||||
} else if crdSchema := openAPISchema.LookupResource(gvk); crdSchema != nil {
|
||||
lookupPatchMeta = strategicpatch.NewPatchMetaFromOpenAPI(crdSchema)
|
||||
}
|
||||
|
||||
// Calculate live patch
|
||||
livePatch, err := getMergePatch(normalized.Lives[idx], live, lookupPatchMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
normalizedTarget, err = applyMergePatch(normalizedTarget, livePatch, versionedObject)
|
||||
// Apply the patch to the normalized target
|
||||
// This ensures ignored fields in live are restored into the target before syncing
|
||||
normalizedTarget, err = applyMergePatch(normalizedTarget, livePatch, versionedObject, lookupPatchMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
patchedTargets = append(patchedTargets, normalizedTarget)
|
||||
}
|
||||
|
||||
return patchedTargets, nil
|
||||
}
|
||||
|
||||
// getMergePatch calculates and returns the patch between the original and the
|
||||
// modified unstructures.
|
||||
func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMeta *strategicpatch.PatchMetaFromStruct) ([]byte, error) {
|
||||
func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMeta strategicpatch.LookupPatchMeta) ([]byte, error) {
|
||||
originalJSON, err := original.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -496,18 +509,35 @@ func getMergePatch(original, modified *unstructured.Unstructured, lookupPatchMet
|
||||
return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// applyMergePatch will apply the given patch in the obj and return the patched
|
||||
// unstructure.
|
||||
func applyMergePatch(obj *unstructured.Unstructured, patch []byte, versionedObject any) (*unstructured.Unstructured, error) {
|
||||
// applyMergePatch will apply the given patch in the obj and return the patched unstructure.
|
||||
func applyMergePatch(obj *unstructured.Unstructured, patch []byte, versionedObject any, meta strategicpatch.LookupPatchMeta) (*unstructured.Unstructured, error) {
|
||||
originalJSON, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var patchedJSON []byte
|
||||
if versionedObject == nil {
|
||||
patchedJSON, err = jsonpatch.MergePatch(originalJSON, patch)
|
||||
} else {
|
||||
switch {
|
||||
case versionedObject != nil:
|
||||
patchedJSON, err = strategicpatch.StrategicMergePatch(originalJSON, patch, versionedObject)
|
||||
case meta != nil:
|
||||
var originalMap, patchMap map[string]any
|
||||
if err := json.Unmarshal(originalJSON, &originalMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(patch, &patchMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
patchedMap, err := strategicpatch.StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patchedJSON, err = json.Marshal(patchedMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
patchedJSON, err = jsonpatch.MergePatch(originalJSON, patch)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
openapi_v2 "github.com/google/gnostic-models/openapiv2"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -23,6 +31,29 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
|
||||
)
|
||||
|
||||
type fakeDiscovery struct {
|
||||
schema *openapi_v2.Document
|
||||
}
|
||||
|
||||
func (f *fakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
return f.schema, nil
|
||||
}
|
||||
|
||||
func loadCRDSchema(t *testing.T, path string) *openapi_v2.Document {
|
||||
t.Helper()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
require.NoError(t, err)
|
||||
|
||||
jsonData, err := yaml.YAMLToJSON(data)
|
||||
require.NoError(t, err)
|
||||
|
||||
doc, err := openapi_v2.ParseDocument(jsonData)
|
||||
require.NoError(t, err)
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
func TestPersistRevisionHistory(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = nil
|
||||
@@ -385,7 +416,7 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
f := setup(t, ignores)
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
targets, err := normalizeTargetResources(nil, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -398,7 +429,7 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
f := setup(t, []v1alpha1.ResourceIgnoreDifferences{})
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
targets, err := normalizeTargetResources(nil, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -418,7 +449,7 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
unstructured.RemoveNestedField(live.Object, "metadata", "annotations", "iksm-version")
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
targets, err := normalizeTargetResources(nil, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -443,7 +474,7 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
f := setup(t, ignores)
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
targets, err := normalizeTargetResources(nil, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -458,7 +489,6 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
assert.Equal(t, int64(4), replicas)
|
||||
})
|
||||
t.Run("will keep new array entries not found in live state if not ignored", func(t *testing.T) {
|
||||
t.Skip("limitation in the current implementation")
|
||||
// given
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
@@ -472,7 +502,7 @@ func TestNormalizeTargetResources(t *testing.T) {
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
targets, err := normalizeTargetResources(nil, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -509,6 +539,11 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("will properly ignore nested fields within arrays", func(t *testing.T) {
|
||||
doc := loadCRDSchema(t, "testdata/schemas/httpproxy_openapi_v2.yaml")
|
||||
disco := &fakeDiscovery{schema: doc}
|
||||
oapiGetter := openapi.NewOpenAPIGetter(disco)
|
||||
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
|
||||
require.NoError(t, err)
|
||||
// given
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
@@ -522,8 +557,11 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
gvk := schema.GroupVersionKind{Group: "projectcontour.io", Version: "v1", Kind: "HTTPProxy"}
|
||||
fmt.Printf("LookupResource result: %+v\n", oapiResources.LookupResource(gvk))
|
||||
|
||||
// when
|
||||
patchedTargets, err := normalizeTargetResources(f.comparisonResult)
|
||||
patchedTargets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -562,7 +600,7 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
targets, err := normalizeTargetResources(nil, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -614,7 +652,7 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
// when
|
||||
targets, err := normalizeTargetResources(f.comparisonResult)
|
||||
targets, err := normalizeTargetResources(nil, f.comparisonResult)
|
||||
|
||||
// then
|
||||
require.NoError(t, err)
|
||||
@@ -668,6 +706,175 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
assert.Equal(t, "EV", env0["name"])
|
||||
assert.Equal(t, "here", env0["value"])
|
||||
})
|
||||
|
||||
t.Run("patches ignored differences in individual array elements of HTTPProxy CRD", func(t *testing.T) {
|
||||
doc := loadCRDSchema(t, "testdata/schemas/httpproxy_openapi_v2.yaml")
|
||||
disco := &fakeDiscovery{schema: doc}
|
||||
oapiGetter := openapi.NewOpenAPIGetter(disco)
|
||||
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
|
||||
require.NoError(t, err)
|
||||
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "projectcontour.io",
|
||||
Kind: "HTTPProxy",
|
||||
JQPathExpressions: []string{".spec.routes[].rateLimitPolicy.global.descriptors[].entries[]"},
|
||||
},
|
||||
}
|
||||
|
||||
f := setupHTTPProxy(t, ignores)
|
||||
|
||||
target := test.YamlToUnstructured(testdata.TargetHTTPProxy)
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
live := test.YamlToUnstructured(testdata.LiveHTTPProxy)
|
||||
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
|
||||
|
||||
patchedTargets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, patchedTargets, 1)
|
||||
patched := patchedTargets[0]
|
||||
|
||||
// verify descriptors array in patched target
|
||||
descriptors := dig(patched.Object, "spec", "routes", 0, "rateLimitPolicy", "global", "descriptors").([]any)
|
||||
require.Len(t, descriptors, 1) // Only the descriptors with ignored entries should remain
|
||||
|
||||
// verify individual entries array inside the descriptor
|
||||
entriesArr := dig(patched.Object, "spec", "routes", 0, "rateLimitPolicy", "global", "descriptors", 0, "entries").([]any)
|
||||
require.Len(t, entriesArr, 1) // Only the ignored entry should be patched
|
||||
|
||||
// verify the content of the entry is preserved correctly
|
||||
entry := entriesArr[0].(map[string]any)
|
||||
requestHeader := entry["requestHeader"].(map[string]any)
|
||||
assert.Equal(t, "sample-header", requestHeader["headerName"])
|
||||
assert.Equal(t, "sample-key", requestHeader["descriptorKey"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestNormalizeTargetResourcesCRDs(t *testing.T) {
|
||||
type fixture struct {
|
||||
comparisonResult *comparisonResult
|
||||
}
|
||||
setupHTTPProxy := func(t *testing.T, ignores []v1alpha1.ResourceIgnoreDifferences) *fixture {
|
||||
t.Helper()
|
||||
dc, err := diff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(ignores, nil, true, normalizers.IgnoreNormalizerOpts{}).
|
||||
WithNoCache().
|
||||
Build()
|
||||
require.NoError(t, err)
|
||||
live := test.YamlToUnstructured(testdata.SimpleAppLiveYaml)
|
||||
target := test.YamlToUnstructured(testdata.SimpleAppTargetYaml)
|
||||
return &fixture{
|
||||
&comparisonResult{
|
||||
reconciliationResult: sync.ReconciliationResult{
|
||||
Live: []*unstructured.Unstructured{live},
|
||||
Target: []*unstructured.Unstructured{target},
|
||||
},
|
||||
diffConfig: dc,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("sample-app", func(t *testing.T) {
|
||||
doc := loadCRDSchema(t, "testdata/schemas/simple-app.yaml")
|
||||
disco := &fakeDiscovery{schema: doc}
|
||||
oapiGetter := openapi.NewOpenAPIGetter(disco)
|
||||
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
|
||||
require.NoError(t, err)
|
||||
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "example.com",
|
||||
Kind: "SimpleApp",
|
||||
JQPathExpressions: []string{".spec.servers[1].enabled", ".spec.servers[0].port"},
|
||||
},
|
||||
}
|
||||
|
||||
f := setupHTTPProxy(t, ignores)
|
||||
|
||||
target := test.YamlToUnstructured(testdata.SimpleAppTargetYaml)
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
live := test.YamlToUnstructured(testdata.SimpleAppLiveYaml)
|
||||
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
|
||||
|
||||
patchedTargets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, patchedTargets, 1)
|
||||
|
||||
patched := patchedTargets[0]
|
||||
require.NotNil(t, patched)
|
||||
|
||||
// 'spec.servers' array has length 2
|
||||
servers := dig(patched.Object, "spec", "servers").([]any)
|
||||
require.Len(t, servers, 2)
|
||||
|
||||
// first server's 'name' is 'server1'
|
||||
name1 := dig(patched.Object, "spec", "servers", 0, "name").(string)
|
||||
assert.Equal(t, "server1", name1)
|
||||
|
||||
assert.Equal(t, int64(8081), dig(patched.Object, "spec", "servers", 0, "port").(int64))
|
||||
assert.Equal(t, int64(9090), dig(patched.Object, "spec", "servers", 1, "port").(int64))
|
||||
|
||||
// first server's 'enabled' should be true
|
||||
enabled1 := dig(patched.Object, "spec", "servers", 0, "enabled").(bool)
|
||||
assert.True(t, enabled1)
|
||||
|
||||
// second server's 'name' should be 'server2'
|
||||
name2 := dig(patched.Object, "spec", "servers", 1, "name").(string)
|
||||
assert.Equal(t, "server2", name2)
|
||||
|
||||
// second server's 'enabled' should be true (respected from live due to ignoreDifferences)
|
||||
enabled2 := dig(patched.Object, "spec", "servers", 1, "enabled").(bool)
|
||||
assert.True(t, enabled2)
|
||||
})
|
||||
t.Run("rollout-obj", func(t *testing.T) {
|
||||
// Load Rollout CRD schema like SimpleApp
|
||||
doc := loadCRDSchema(t, "testdata/schemas/rollout-schema.yaml")
|
||||
disco := &fakeDiscovery{schema: doc}
|
||||
oapiGetter := openapi.NewOpenAPIGetter(disco)
|
||||
oapiResources, err := openapi.NewOpenAPIParser(oapiGetter).Parse()
|
||||
require.NoError(t, err)
|
||||
|
||||
ignores := []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "argoproj.io",
|
||||
Kind: "Rollout",
|
||||
JQPathExpressions: []string{`.spec.template.spec.containers[] | select(.name == "init") | .image`},
|
||||
},
|
||||
}
|
||||
|
||||
f := setupHTTPProxy(t, ignores)
|
||||
|
||||
live := test.YamlToUnstructured(testdata.LiveRolloutYaml)
|
||||
target := test.YamlToUnstructured(testdata.TargetRolloutYaml)
|
||||
f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live}
|
||||
f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target}
|
||||
|
||||
targets, err := normalizeTargetResources(oapiResources, f.comparisonResult)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, targets, 1)
|
||||
|
||||
patched := targets[0]
|
||||
require.NotNil(t, patched)
|
||||
|
||||
containers := dig(patched.Object, "spec", "template", "spec", "containers").([]any)
|
||||
require.Len(t, containers, 2)
|
||||
|
||||
initContainer := containers[0].(map[string]any)
|
||||
mainContainer := containers[1].(map[string]any)
|
||||
|
||||
// Assert init container image is preserved (ignoreDifferences works)
|
||||
initImage := dig(initContainer, "image").(string)
|
||||
assert.Equal(t, "init-container:v1", initImage)
|
||||
|
||||
// Assert main container fields as expected
|
||||
mainName := dig(mainContainer, "name").(string)
|
||||
assert.Equal(t, "main", mainName)
|
||||
|
||||
mainImage := dig(mainContainer, "image").(string)
|
||||
assert.Equal(t, "main-container:v1", mainImage)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
12
controller/testdata/data.go
vendored
12
controller/testdata/data.go
vendored
@@ -32,4 +32,16 @@ var (
|
||||
|
||||
//go:embed additional-image-replicas-deployment.yaml
|
||||
AdditionalImageReplicaDeploymentYaml string
|
||||
|
||||
//go:embed simple-app-live.yaml
|
||||
SimpleAppLiveYaml string
|
||||
|
||||
//go:embed simple-app-target.yaml
|
||||
SimpleAppTargetYaml string
|
||||
|
||||
//go:embed target-rollout.yaml
|
||||
TargetRolloutYaml string
|
||||
|
||||
//go:embed live-rollout.yaml
|
||||
LiveRolloutYaml string
|
||||
)
|
||||
|
||||
25
controller/testdata/live-rollout.yaml
vendored
Normal file
25
controller/testdata/live-rollout.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
name: rollout-sample
|
||||
spec:
|
||||
replicas: 2
|
||||
strategy:
|
||||
canary:
|
||||
steps:
|
||||
- setWeight: 20
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rollout-sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rollout-sample
|
||||
spec:
|
||||
containers:
|
||||
- name: init
|
||||
image: init-container:v1
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 10
|
||||
- name: main
|
||||
image: main-container:v1
|
||||
62
controller/testdata/schemas/httpproxy_openapi_v2.yaml
vendored
Normal file
62
controller/testdata/schemas/httpproxy_openapi_v2.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
swagger: "2.0"
|
||||
info:
|
||||
title: HTTPProxy
|
||||
version: "v1"
|
||||
paths: {}
|
||||
definitions:
|
||||
io.projectcontour.v1.HTTPProxy:
|
||||
type: object
|
||||
x-kubernetes-group-version-kind:
|
||||
- group: projectcontour.io
|
||||
version: v1
|
||||
kind: HTTPProxy
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
routes:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
rateLimitPolicy:
|
||||
type: object
|
||||
properties:
|
||||
global:
|
||||
type: object
|
||||
properties:
|
||||
descriptors:
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- entries
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
entries:
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- headerName
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
requestHeader:
|
||||
type: object
|
||||
properties:
|
||||
descriptorKey:
|
||||
type: string
|
||||
headerName:
|
||||
type: string
|
||||
requestHeaderValueMatch:
|
||||
type: object
|
||||
properties:
|
||||
headers:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
contains:
|
||||
type: string
|
||||
value:
|
||||
type: string
|
||||
67
controller/testdata/schemas/rollout-schema.yaml
vendored
Normal file
67
controller/testdata/schemas/rollout-schema.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
swagger: "2.0"
|
||||
info:
|
||||
title: Rollout
|
||||
version: "v1alpha1"
|
||||
paths: {}
|
||||
definitions:
|
||||
argoproj.io.v1alpha1.Rollout:
|
||||
type: object
|
||||
x-kubernetes-group-version-kind:
|
||||
- group: argoproj.io
|
||||
version: v1alpha1
|
||||
kind: Rollout
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
replicas:
|
||||
type: integer
|
||||
strategy:
|
||||
type: object
|
||||
properties:
|
||||
canary:
|
||||
type: object
|
||||
properties:
|
||||
steps:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
setWeight:
|
||||
type: integer
|
||||
selector:
|
||||
type: object
|
||||
properties:
|
||||
matchLabels:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
template:
|
||||
type: object
|
||||
properties:
|
||||
metadata:
|
||||
type: object
|
||||
properties:
|
||||
labels:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
containers:
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
image:
|
||||
type: string
|
||||
livenessProbe:
|
||||
type: object
|
||||
properties:
|
||||
initialDelaySeconds:
|
||||
type: integer
|
||||
29
controller/testdata/schemas/simple-app.yaml
vendored
Normal file
29
controller/testdata/schemas/simple-app.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
swagger: "2.0"
|
||||
info:
|
||||
title: SimpleApp
|
||||
version: "v1"
|
||||
paths: {}
|
||||
definitions:
|
||||
example.com.v1.SimpleApp:
|
||||
type: object
|
||||
x-kubernetes-group-version-kind:
|
||||
- group: example.com
|
||||
version: v1
|
||||
kind: SimpleApp
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
servers:
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
port:
|
||||
type: integer
|
||||
enabled:
|
||||
type: boolean
|
||||
12
controller/testdata/simple-app-live.yaml
vendored
Normal file
12
controller/testdata/simple-app-live.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: example.com/v1
|
||||
kind: SimpleApp
|
||||
metadata:
|
||||
name: simpleapp-sample
|
||||
spec:
|
||||
servers:
|
||||
- name: server1
|
||||
port: 8081 # port changed in live from 8080
|
||||
enabled: true
|
||||
- name: server2
|
||||
port: 9090
|
||||
enabled: true # enabled changed in live from false
|
||||
12
controller/testdata/simple-app-target.yaml
vendored
Normal file
12
controller/testdata/simple-app-target.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: example.com/v1
|
||||
kind: SimpleApp
|
||||
metadata:
|
||||
name: simpleapp-sample
|
||||
spec:
|
||||
servers:
|
||||
- name: server1
|
||||
port: 8080
|
||||
enabled: true
|
||||
- name: server2
|
||||
port: 9090
|
||||
enabled: false
|
||||
25
controller/testdata/target-rollout.yaml
vendored
Normal file
25
controller/testdata/target-rollout.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Rollout
|
||||
metadata:
|
||||
name: rollout-sample
|
||||
spec:
|
||||
replicas: 2
|
||||
strategy:
|
||||
canary:
|
||||
steps:
|
||||
- setWeight: 20
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rollout-sample
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rollout-sample
|
||||
spec:
|
||||
containers:
|
||||
- name: init
|
||||
image: init-container:v1
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 15
|
||||
- name: main
|
||||
image: main-container:v1
|
||||
@@ -18,11 +18,9 @@ These are the upcoming releases dates:
|
||||
| v2.13 | Monday, Sep. 16, 2024 | Monday, Nov. 4, 2024 | [Regina Voloshin](https://github.com/reggie-k) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/19513) |
|
||||
| v2.14 | Monday, Dec. 16, 2024 | Monday, Feb. 3, 2025 | [Ryan Umstead](https://github.com/rumstead) | [Pavel Kostohrys](https://github.com/pasha-codefresh) | [checklist](https://github.com/argoproj/argo-cd/issues/20869) |
|
||||
| v3.0 | Monday, Mar. 17, 2025 | Tuesday, May 6, 2025 | [Regina Voloshin](https://github.com/reggie-k) | | [checklist](https://github.com/argoproj/argo-cd/issues/21735) |
|
||||
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](https://github.com/argoproj/argo-cd/issues/23347) |
|
||||
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | [Michael Crenshaw](https://github.com/crenshaw-dev) | [checklist](https://github.com/argoproj/argo-cd/issues/24539) |
|
||||
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | [Peter Jiang](https://github.com/pjiang-dev) | [Regina Voloshin](https://github.com/reggie-k) | [checklist](https://github.com/argoproj/argo-cd/issues/25211) |
|
||||
| v3.4 | Monday, Mar. 16, 2026 | Monday, May. 4, 2026 | | |
|
||||
| v3.5 | Monday, Jun. 15, 2026 | Monday, Aug. 3, 2026 | | |
|
||||
| v3.1 | Monday, Jun. 16, 2025 | Monday, Aug. 4, 2025 | [Christian Hernandez](https://github.com/christianh814) | [Alexandre Gaudreault](https://github.com/agaudreault) | [checklist](#) |
|
||||
| v3.2 | Monday, Sep. 15, 2025 | Monday, Nov. 3, 2025 | [Nitish Kumar](https://github.com/nitishfy) | | [checklist](#) |
|
||||
| v3.3 | Monday, Dec. 15, 2025 | Monday, Feb. 2, 2026 | | |
|
||||
|
||||
Actual release dates might differ from the plan by a few days.
|
||||
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
# Progressive Syncs
|
||||
|
||||
!!! warning "Alpha Feature (Since v2.6.0)"
|
||||
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha)
|
||||
feature that allows you to control the order in which the ApplicationSet controller will create or update the Applications
|
||||
|
||||
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha)
|
||||
feature that allows you to control the order in which the ApplicationSet controller will create or update the Applications
|
||||
owned by an ApplicationSet resource. It may be removed in future releases or modified in backwards-incompatible ways.
|
||||
|
||||
## Use Cases
|
||||
|
||||
The Progressive Syncs feature set is intended to be light and flexible. The feature only interacts with the health of managed Applications. It is not intended to support direct integrations with other Rollout controllers (such as the native ReplicaSet controller or Argo Rollouts).
|
||||
|
||||
* Progressive Syncs watch for the managed Application resources to become "Healthy" before proceeding to the next stage.
|
||||
* Deployments, DaemonSets, StatefulSets, and [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) are all supported, because the Application enters a "Progressing" state while pods are being rolled out. In fact, any resource with a health check that can report a "Progressing" status is supported.
|
||||
* [Argo CD Resource Hooks](../../user-guide/resource_hooks.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
|
||||
- Progressive Syncs watch for the managed Application resources to become "Healthy" before proceeding to the next stage.
|
||||
- Deployments, DaemonSets, StatefulSets, and [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) are all supported, because the Application enters a "Progressing" state while pods are being rolled out. In fact, any resource with a health check that can report a "Progressing" status is supported.
|
||||
- [Argo CD Resource Hooks](../../user-guide/resource_hooks.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
|
||||
|
||||
## Enabling Progressive Syncs
|
||||
|
||||
As an experimental feature, progressive syncs must be explicitly enabled, in one of these ways.
|
||||
|
||||
1. Pass `--enable-progressive-syncs` to the ApplicationSet controller args.
|
||||
@@ -23,17 +26,18 @@ As an experimental feature, progressive syncs must be explicitly enabled, in one
|
||||
|
||||
ApplicationSet strategies control both how applications are created (or updated) and deleted. These operations are configured using two separate fields:
|
||||
|
||||
* **Creation Strategy** (`type` field): Controls application creation and updates
|
||||
* **Deletion Strategy** (`deletionOrder` field): Controls application deletion order
|
||||
- **Creation Strategy** (`type` field): Controls application creation and updates
|
||||
- **Deletion Strategy** (`deletionOrder` field): Controls application deletion order
|
||||
|
||||
### Creation Strategies
|
||||
|
||||
The `type` field controls how applications are created and updated. Available values:
|
||||
|
||||
* **AllAtOnce** (default)
|
||||
* **RollingSync**
|
||||
- **AllAtOnce** (default)
|
||||
- **RollingSync**
|
||||
|
||||
#### AllAtOnce
|
||||
|
||||
This default Application update behavior is unchanged from the original ApplicationSet implementation.
|
||||
|
||||
All Applications managed by the ApplicationSet resource are updated simultaneously when the ApplicationSet is updated.
|
||||
@@ -41,25 +45,25 @@ All Applications managed by the ApplicationSet resource are updated simultaneous
|
||||
```yaml
|
||||
spec:
|
||||
strategy:
|
||||
type: AllAtOnce # explicit, but this is the default
|
||||
type: AllAtOnce # explicit, but this is the default
|
||||
```
|
||||
|
||||
#### RollingSync
|
||||
|
||||
This update strategy allows you to group Applications by labels present on the generated Application resources.
|
||||
When the ApplicationSet changes, the changes will be applied to each group of Application resources sequentially.
|
||||
|
||||
* Application groups are selected using their labels and `matchExpressions`.
|
||||
* All `matchExpressions` must be true for an Application to be selected (multiple expressions match with AND behavior).
|
||||
* The `In` and `NotIn` operators must match at least one value to be considered true (OR behavior).
|
||||
* The `NotIn` operator has priority in the event that both a `NotIn` and `In` operator produce a match.
|
||||
* All Applications in each group must become Healthy before the ApplicationSet controller will proceed to update the next group of Applications.
|
||||
* The number of simultaneous Application updates in a group will not exceed its `maxUpdate` parameter (default is 100%, unbounded).
|
||||
* RollingSync will capture external changes outside the ApplicationSet resource, since it relies on watching the OutOfSync status of the managed Applications.
|
||||
* RollingSync will force all generated Applications to have autosync disabled. Warnings are printed in the applicationset-controller logs for any Application specs with an automated syncPolicy enabled.
|
||||
* Sync operations are triggered the same way as if they were triggered by the UI or CLI (by directly setting the `operation` status field on the Application resource). This means that a RollingSync will respect sync windows just as if a user had clicked the "Sync" button in the Argo UI.
|
||||
* When a sync is triggered, the sync is performed with the same syncPolicy configured for the Application. For example, this preserves the Application's retry settings.
|
||||
* If an Application is considered "Pending" for `applicationsetcontroller.default.application.progressing.timeout` seconds, the Application is automatically moved to Healthy status (default 300).
|
||||
* If an Application is not selected in any step, it will be excluded from the rolling sync and needs to be manually synced through the CLI or UI.
|
||||
- Application groups are selected using their labels and `matchExpressions`.
|
||||
- All `matchExpressions` must be true for an Application to be selected (multiple expressions match with AND behavior).
|
||||
- The `In` and `NotIn` operators must match at least one value to be considered true (OR behavior).
|
||||
- The `NotIn` operator has priority in the event that both a `NotIn` and `In` operator produce a match.
|
||||
- All Applications in each group must become Healthy before the ApplicationSet controller will proceed to update the next group of Applications.
|
||||
- The number of simultaneous Application updates in a group will not exceed its `maxUpdate` parameter (default is 100%, unbounded).
|
||||
- RollingSync will capture external changes outside the ApplicationSet resource, since it relies on watching the OutOfSync status of the managed Applications.
|
||||
- RollingSync will force all generated Applications to have autosync disabled. Warnings are printed in the applicationset-controller logs for any Application specs with an automated syncPolicy enabled.
|
||||
- Sync operations are triggered the same way as if they were triggered by the UI or CLI (by directly setting the `operation` status field on the Application resource). This means that a RollingSync will respect sync windows just as if a user had clicked the "Sync" button in the Argo UI.
|
||||
- When a sync is triggered, the sync is performed with the same syncPolicy configured for the Application. For example, this preserves the Application's retry settings.
|
||||
- If an Application is not selected in any step, it will be excluded from the rolling sync and needs to be manually synced through the CLI or UI.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
@@ -84,25 +88,28 @@ spec:
|
||||
|
||||
The `deletionOrder` field controls the order in which applications are deleted when they are removed from the ApplicationSet. Available values:
|
||||
|
||||
* **AllAtOnce** (default)
|
||||
* **Reverse**
|
||||
- **AllAtOnce** (default)
|
||||
- **Reverse**
|
||||
|
||||
#### AllAtOnce Deletion
|
||||
|
||||
This is the default behavior where all applications that need to be deleted are removed simultaneously. This works with both `AllAtOnce` and `RollingSync` creation strategies.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
strategy:
|
||||
type: RollingSync # or AllAtOnce
|
||||
deletionOrder: AllAtOnce # explicit, but this is the default
|
||||
type: RollingSync # or AllAtOnce
|
||||
deletionOrder: AllAtOnce # explicit, but this is the default
|
||||
```
|
||||
|
||||
#### Reverse Deletion
|
||||
|
||||
When using `deletionOrder: Reverse` with RollingSync strategy, applications are deleted in reverse order of the steps defined in `rollingSync.steps`. This ensures that applications deployed in later steps are deleted before applications deployed in earlier steps.
|
||||
This strategy is particularly useful when you need to tear down dependent services in the particular sequence.
|
||||
|
||||
**Requirements for Reverse deletion:**
|
||||
- Must be used with `type: RollingSync`
|
||||
|
||||
- Must be used with `type: RollingSync`
|
||||
- Requires `rollingSync.steps` to be defined
|
||||
- Applications are deleted in reverse order of step sequence
|
||||
|
||||
@@ -119,28 +126,30 @@ spec:
|
||||
- key: envLabel
|
||||
operator: In
|
||||
values:
|
||||
- env-dev # Step 1: Created first, deleted last
|
||||
- env-dev # Step 1: Created first, deleted last
|
||||
- matchExpressions:
|
||||
- key: envLabel
|
||||
- key: envLabel
|
||||
operator: In
|
||||
values:
|
||||
- env-prod # Step 2: Created second, deleted first
|
||||
- env-prod # Step 2: Created second, deleted first
|
||||
```
|
||||
|
||||
In this example, when applications are deleted:
|
||||
|
||||
1. `env-prod` applications (Step 2) are deleted first
|
||||
2. `env-dev` applications (Step 1) are deleted second
|
||||
|
||||
This deletion order is useful for scenarios where you need to tear down dependent services in the correct sequence, such as deleting frontend services before backend dependencies.
|
||||
|
||||
#### Example
|
||||
|
||||
The following example illustrates how to stage a progressive sync over Applications with explicitly configured environment labels.
|
||||
|
||||
Once a change is pushed, the following will happen in order.
|
||||
|
||||
* All `env-dev` Applications will be updated simultaneously.
|
||||
* The rollout will wait for all `env-qa` Applications to be manually synced via the `argocd` CLI or by clicking the Sync button in the UI.
|
||||
* 10% of all `env-prod` Applications will be updated at a time until all `env-prod` Applications have been updated.
|
||||
- All `env-dev` Applications will be updated simultaneously.
|
||||
- The rollout will wait for all `env-qa` Applications to be manually synced via the `argocd` CLI or by clicking the Sync button in the UI.
|
||||
- 10% of all `env-prod` Applications will be updated at a time until all `env-prod` Applications have been updated.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -149,20 +158,20 @@ metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
env: env-dev
|
||||
- cluster: engineering-qa
|
||||
url: https://2.4.6.8
|
||||
env: env-qa
|
||||
- cluster: engineering-prod
|
||||
url: https://9.8.7.6/
|
||||
env: env-prod
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://1.2.3.4
|
||||
env: env-dev
|
||||
- cluster: engineering-qa
|
||||
url: https://2.4.6.8
|
||||
env: env-qa
|
||||
- cluster: engineering-prod
|
||||
url: https://9.8.7.6/
|
||||
env: env-prod
|
||||
strategy:
|
||||
type: RollingSync
|
||||
deletionOrder: Reverse # Applications will be deleted in reverse order of steps
|
||||
deletionOrder: Reverse # Applications will be deleted in reverse order of steps
|
||||
rollingSync:
|
||||
steps:
|
||||
- matchExpressions:
|
||||
@@ -176,15 +185,15 @@ spec:
|
||||
operator: In
|
||||
values:
|
||||
- env-qa
|
||||
maxUpdate: 0 # if 0, no matched applications will be updated
|
||||
maxUpdate: 0 # if 0, no matched applications will be updated
|
||||
- matchExpressions:
|
||||
- key: envLabel
|
||||
operator: In
|
||||
values:
|
||||
- env-prod
|
||||
maxUpdate: 10% # maxUpdate supports both integer and percentage string values (rounds down, but floored at 1 Application for >0%)
|
||||
maxUpdate: 10% # maxUpdate supports both integer and percentage string values (rounds down, but floored at 1 Application for >0%)
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
goTemplateOptions: ['missingkey=error']
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.cluster}}-guestbook'
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
An example of an argocd-cmd-params-cm.yaml file:
|
||||
|
||||
```yaml
|
||||
{ !docs/operator-manual/argocd-cmd-params-cm.yaml! }
|
||||
```
|
||||
{!docs/operator-manual/argocd-cmd-params-cm.yaml!}
|
||||
```
|
||||
|
||||
@@ -219,8 +219,6 @@ data:
|
||||
reposerver.git.lsremote.parallelism.limit: "0"
|
||||
# Git requests timeout.
|
||||
reposerver.git.request.timeout: "15s"
|
||||
# Enable builtin git configuration options that are required for correct argocd-repo-server operation (default "true")
|
||||
reposerver.enable.builtin.git.config: "true"
|
||||
# Include hidden directories from Git
|
||||
reposerver.include.hidden.directories: "false"
|
||||
|
||||
@@ -294,10 +292,6 @@ data:
|
||||
applicationsetcontroller.global.preserved.labels: "acme.com/label1,acme.com/label2"
|
||||
# Enable GitHub API metrics for generators that use GitHub API
|
||||
applicationsetcontroller.enable.github.api.metrics: "false"
|
||||
# The maximum number of resources stored in the status of an ApplicationSet. This is a safeguard to prevent the status from growing too large.
|
||||
applicationsetcontroller.status.max.resources.count: "5000"
|
||||
# Enables profile endpoint on the internal metrics port
|
||||
applicationsetcontroller.profile.enabled: "false"
|
||||
|
||||
## Argo CD Notifications Controller Properties
|
||||
# Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
|
||||
@@ -121,18 +121,6 @@ spec:
|
||||
...
|
||||
```
|
||||
|
||||
### Deleting child applications
|
||||
|
||||
When working with the App of Apps pattern, you may need to delete individual child applications. Starting in 3.2, Argo CD provides consistent deletion behaviour whether you delete from the Applications List or from the parent application's Resource Tree.
|
||||
|
||||
For detailed information about deletion options and behaviour, including:
|
||||
- Consistent deletion across UI views
|
||||
- Non-cascading (orphan) deletion to preserve managed resources
|
||||
- Child application detection and improved dialog messages
|
||||
- Best practices and example scenarios
|
||||
|
||||
See [Deleting Applications in the UI](../user-guide/app_deletion.md#deleting-applications-in-the-ui).
|
||||
|
||||
### Ignoring differences in child applications
|
||||
|
||||
To allow changes in child apps without triggering an out-of-sync status, or modification for debugging etc, the app of apps pattern works with [diff customization](../user-guide/diffing/). The example below shows how to ignore changes to syncPolicy and other common values.
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
|
||||
# Git Configuration
|
||||
|
||||
## System Configuration
|
||||
|
||||
Argo CD uses the Git installation from its base image (Ubuntu), which
|
||||
includes a standard system configuration file located at
|
||||
`/etc/gitconfig`. This file is minimal, just defining filters
|
||||
necessary for Git LFS functionality.
|
||||
|
||||
You can customize Git's system configuration by mounting a file from a
|
||||
ConfigMap or by creating a custom Argo CD image.
|
||||
|
||||
## Global Configuration
|
||||
|
||||
Argo CD runs Git with the `HOME` environment variable set to
|
||||
`/dev/null`. As a result, global Git configuration is not supported.
|
||||
|
||||
## Built-in Configuration
|
||||
|
||||
The `argocd-repo-server` adds specific configuration parameters to the
|
||||
Git environment to ensure proper Argo CD operation. These built-in
|
||||
settings override any conflicting values from the system Git
|
||||
configuration.
|
||||
|
||||
Currently, the following built-in configuration options are set:
|
||||
|
||||
- `maintenance.autoDetach=false`
|
||||
- `gc.autoDetach=false`
|
||||
|
||||
These settings force Git's repository maintenance tasks to run in the
|
||||
foreground. This prevents Git from running detached background
|
||||
processes that could modify the repository and interfere with
|
||||
subsequent Git invocations from `argocd-repo-server`.
|
||||
|
||||
You can disable these built-in settings by setting the
|
||||
`argocd-cmd-params-cm` value `reposerver.enable.builtin.git.config` to
|
||||
`"false"`. This allows you to experiment with background processing or
|
||||
if you are certain that concurrency issues will not occur in your
|
||||
environment.
|
||||
|
||||
> [!NOTE]
|
||||
> Disabling this is not recommended and is not supported!
|
||||
@@ -223,10 +223,10 @@ The following resources have Go-based health checks:
|
||||
|
||||
## Health Checks
|
||||
|
||||
An Argo CD App's health is inferred from the health of its immediate child resources (the resources represented in
|
||||
source control). The App health will be the worst health of its immediate child sources. The priority of most to least
|
||||
healthy statuses is: `Healthy`, `Suspended`, `Progressing`, `Missing`, `Degraded`, `Unknown`. So, for example, if an App
|
||||
has a `Missing` resource and a `Degraded` resource, the App's health will be `Missing`.
|
||||
Argo CD App health is inferred from the health of its immediate child resources as represented in the application source.
|
||||
The App health will be the **worst health of its immediate child resources**, based on the following priority (from most to least healthy):
|
||||
**Healthy, Suspended, Progressing, Missing, Degraded, Unknown.**
|
||||
For example, if an App has a Missing resource and a Degraded resource, the App's health will be **Degraded**.
|
||||
|
||||
But the health of a resource is not inherited from child resources - it is calculated using only information about the
|
||||
resource itself. A resource's status field may or may not contain information about the health of a child resource, and
|
||||
|
||||
@@ -398,16 +398,13 @@ Not all HTTP responses are eligible for retries. The following conditions will n
|
||||
|
||||
## CPU/Memory Profiling
|
||||
|
||||
Argo CD optionally exposes a profiling endpoint that can be used to profile the CPU and memory usage of the Argo CD
|
||||
component.
|
||||
The profiling endpoint is available on metrics port of each component. See [metrics](./metrics.md) for more information
|
||||
about the port.
|
||||
For security reasons, the profiling endpoint is disabled by default. The endpoint can be enabled by setting the
|
||||
`server.profile.enabled`, `applicationsetcontroller.profile.enabled`, or `controller.profile.enabled` key
|
||||
of [argocd-cmd-params-cm](argocd-cmd-params-cm.yaml) ConfigMap to `true`.
|
||||
Once the endpoint is enabled, you can use go profile tool to collect the CPU and memory profiles. Example:
|
||||
Argo CD optionally exposes a profiling endpoint that can be used to profile the CPU and memory usage of the Argo CD component.
|
||||
The profiling endpoint is available on metrics port of each component. See [metrics](./metrics.md) for more information about the port.
|
||||
For security reasons the profiling endpoint is disabled by default. The endpoint can be enabled by setting the `server.profile.enabled`
|
||||
or `controller.profile.enabled` key of [argocd-cmd-params-cm](argocd-cmd-params-cm.yaml) ConfigMap to `true`.
|
||||
Once the endpoint is enabled you can use go profile tool to collect the CPU and memory profiles. Example:
|
||||
|
||||
```bash
|
||||
$ kubectl port-forward svc/argocd-metrics 8082:8082
|
||||
$ go tool pprof http://localhost:8082/debug/pprof/heap
|
||||
```
|
||||
```
|
||||
|
||||
@@ -35,26 +35,14 @@ metadata:
|
||||
name: argocd-notifications-cm
|
||||
data:
|
||||
trigger.sync-operation-change: |
|
||||
- when: app.status?.operationState.phase in ['Succeeded']
|
||||
- when: app.status.operationState.phase in ['Succeeded']
|
||||
send: [github-commit-status]
|
||||
- when: app.status?.operationState.phase in ['Running']
|
||||
- when: app.status.operationState.phase in ['Running']
|
||||
send: [github-commit-status]
|
||||
- when: app.status?.operationState.phase in ['Error', 'Failed']
|
||||
- when: app.status.operationState.phase in ['Error', 'Failed']
|
||||
send: [app-sync-failed, github-commit-status]
|
||||
```
|
||||
|
||||
|
||||
## Accessing Optional Manifest Sections and Fields
|
||||
|
||||
Note that in the trigger example above, the `?.` (optional chaining) operator is used to access the Application's
|
||||
`status.operationState` section. This section is optional; it is not present when an operation has been initiated but has not yet
|
||||
started by the Application Controller.
|
||||
|
||||
If the `?.` operator were not used, `status.operationState` would resolve to `nil` and the evaluation of the
|
||||
`app.status.operationState.phase` expression would fail. The `app.status?.operationState.phase` expression is equivalent to
|
||||
`app.status.operationState != nil ? app.status.operationState.phase : nil`.
|
||||
|
||||
|
||||
## Avoid Sending Same Notification Too Often
|
||||
|
||||
In some cases, the trigger condition might be "flapping". The example below illustrates the problem.
|
||||
@@ -72,14 +60,14 @@ data:
|
||||
# Optional 'oncePer' property ensure that notification is sent only once per specified field value
|
||||
# E.g. following is triggered once per sync revision
|
||||
trigger.on-deployed: |
|
||||
when: app.status?.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy'
|
||||
when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy'
|
||||
oncePer: app.status.sync.revision
|
||||
send: [app-sync-succeeded]
|
||||
```
|
||||
|
||||
**Mono Repo Usage**
|
||||
|
||||
When one repo is used to sync multiple applications, the `oncePer: app.status.sync.revision` field will trigger a notification for each commit. For mono repos, the better approach will be using `oncePer: app.status?.operationState.syncResult.revision` statement. This way a notification will be sent only for a particular Application's revision.
|
||||
When one repo is used to sync multiple applications, the `oncePer: app.status.sync.revision` field will trigger a notification for each commit. For mono repos, the better approach will be using `oncePer: app.status.operationState.syncResult.revision` statement. This way a notification will be sent only for a particular Application's revision.
|
||||
|
||||
### oncePer
|
||||
|
||||
@@ -134,7 +122,7 @@ Triggers have access to the set of built-in functions.
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
when: time.Now().Sub(time.Parse(app.status?.operationState.startedAt)).Minutes() >= 5
|
||||
when: time.Now().Sub(time.Parse(app.status.operationState.startedAt)).Minutes() >= 5
|
||||
```
|
||||
|
||||
{!docs/operator-manual/notifications/functions.md!}
|
||||
|
||||
@@ -37,7 +37,6 @@ argocd-applicationset-controller [flags]
|
||||
--kubeconfig string Path to a kube config. Only required if out-of-cluster
|
||||
--logformat string Set the logging format. One of: json|text (default "json")
|
||||
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
--max-resources-status-count int Max number of resources stored in appset status.
|
||||
--metrics-addr string The address the metric endpoint binds to. (default ":8080")
|
||||
--metrics-applicationset-labels strings List of Application labels that will be added to the argocd_applicationset_labels metric
|
||||
-n, --namespace string If present, the namespace scope for this CLI request
|
||||
|
||||
@@ -21,7 +21,6 @@ argocd-repo-server [flags]
|
||||
--disable-helm-manifest-max-extracted-size Disable maximum size of helm manifest archives when extracted
|
||||
--disable-oci-manifest-max-extracted-size Disable maximum size of oci manifest archives when extracted
|
||||
--disable-tls Disable TLS on the gRPC endpoint
|
||||
--enable-builtin-git-config Enable builtin git configuration options that are required for correct argocd-repo-server operation. (default true)
|
||||
--helm-manifest-max-extracted-size string Maximum size of helm manifest archives when extracted (default "1G")
|
||||
--helm-registry-max-index-size string Maximum size of registry index file (default "1G")
|
||||
-h, --help help for argocd-repo-server
|
||||
|
||||
@@ -1,5 +1,2 @@
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 3.2 | v1.33, v1.32, v1.31, v1.30 |
|
||||
| 3.1 | v1.33, v1.32, v1.31, v1.30 |
|
||||
| 3.0 | v1.32, v1.31, v1.30, v1.29 |
|
||||
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
|
||||
version.
|
||||
|
||||
@@ -55,16 +55,4 @@ spec:
|
||||
+ protocol: UDP
|
||||
+ - port: 53
|
||||
+ protocol: TCP
|
||||
```
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [route53.aws.crossplane.io/ResourceRecordSet](https://github.com/argoproj/argo-cd/commit/666499f6108124ef7bfa0c6cc616770c6dc4f42c)
|
||||
* [cloudfront.aws.crossplane.io/Distribution](https://github.com/argoproj/argo-cd/commit/21c384f42354ada2b94c18773104527eb27f86bc)
|
||||
* [beat.k8s.elastic.co/Beat](https://github.com/argoproj/argo-cd/commit/5100726fd61617a0001a27233cfe8ac4354bdbed)
|
||||
* [apps.kruise.io/AdvancedCronjob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/BroadcastJob](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/CloneSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/DaemonSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [apps.kruise.io/StatefulSet](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
* [rollouts.kruise.io/Rollout](https://github.com/argoproj/argo-cd/commit/d6da9f2a15fba708d70531c5b3f2797663fb3c08)
|
||||
```
|
||||
@@ -57,24 +57,3 @@ The affected ApplicationSet fields are the following (jq selector syntax):
|
||||
* `.spec.generators[].clusterDecisionResource.labelSelector`
|
||||
* `.spec.generators[].matrix.generators[].selector`
|
||||
* `.spec.generators[].merge.generators[].selector`
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [core.humio.com/HumioAction](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioAlert](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioCluster](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioIngestToken](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioParser](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioRepository](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [core.humio.com/HumioView](https://github.com/argoproj/argo-cd/commit/1cd6fcac4f38edf3cd3b5409fa1b6d4aa4ad2694)
|
||||
* [k8s.mariadb.com/Backup](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/Database](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/Grant](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/MariaDB](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/SqlJob](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [k8s.mariadb.com/User](https://github.com/argoproj/argo-cd/commit/440fbac12b7469fd3ed4a6e1f6ace5cf7eacaf39)
|
||||
* [kafka.strimzi.io/KafkaBridge](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
|
||||
* [kafka.strimzi.io/KafkaConnector](https://github.com/argoproj/argo-cd/commit/f13861740c17be1ab261f986532706cdda638b24)
|
||||
* [keda.sh/ScaledObject](https://github.com/argoproj/argo-cd/commit/9bc9ff9c7a3573742a767c38679cbefb4f07c1c0)
|
||||
* [openfaas.com/Function](https://github.com/argoproj/argo-cd/commit/2a05ae02ab90ae06fefa97ed6b9310590d317783)
|
||||
* [camel.apache.org/Integration](https://github.com/argoproj/argo-cd/commit/1e2f5987d25307581cd56b8fe9d329633e0f704f)
|
||||
@@ -68,41 +68,6 @@ The default extension for log files generated by Argo CD when using the "Downloa
|
||||
- Consistency with standard log file conventions.
|
||||
|
||||
If you have any custom scripts or tools that depend on the `.txt` extension, please update them accordingly.
|
||||
|
||||
## Added proxy to kustomize
|
||||
|
||||
Proxy config set on repository credentials / repository templates is now passed down to the `kustomize build` command.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [controlplane.cluster.x-k8s.io/AWSManagedControlPlane](https://github.com/argoproj/argo-cd/commit/f1105705126153674c79f69b5d9c9647360d16f5)
|
||||
* [policy.open-cluster-management.io/CertificatePolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/ConfigurationPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/OperatorPolicy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [policy.open-cluster-management.io/Policy](https://github.com/argoproj/argo-cd/commit/d2231577c7f667d86bd0aa9505f871ecf1fde2bb)
|
||||
* [PodDisruptionBudget](https://github.com/argoproj/argo-cd/commit/e86258d8a5049260b841abc0ef1fd7f7a4b7cd45)
|
||||
* [cluster.x-k8s.io/MachinePool](https://github.com/argoproj/argo-cd/commit/59e00911304288b4f96889bf669b6ed2aecdf31b)
|
||||
* [lifecycle.keptn.sh/KeptnWorkloadVersion](https://github.com/argoproj/argo-cd/commit/ddc0b0fd3fa7e0b53170582846b20be23c301185)
|
||||
* [numaplane.numaproj.io/ISBServiceRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [numaplane.numaproj.io/NumaflowControllerRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [numaplane.numaproj.io/PipelineRollout](https://github.com/argoproj/argo-cd/commit/d6bc02b1956a375f853e9d5c37d97ee6963154df)
|
||||
* [rds.aws.crossplane.io/DBCluster](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
|
||||
* [rds.aws.crossplane.io/DBInstance](https://github.com/argoproj/argo-cd/commit/f26b76a7aa81637474cfb7992629ea1007124606)
|
||||
* [iam.aws.crossplane.io/Policy](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [iam.aws.crossplane.io/RolePolicyAttachment](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [iam.aws.crossplane.io/Role](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [s3.aws.crossplane.io/Bucket](https://github.com/argoproj/argo-cd/commit/7f338e910f11929d172b39f5c2b395948529f7e8)
|
||||
* [metrics.keptn.sh/KeptnMetric](https://github.com/argoproj/argo-cd/commit/326cc4a06b2cb5ac99797d3f04c2d4c48b8692e2)
|
||||
* [metrics.keptn.sh/Analysis](https://github.com/argoproj/argo-cd/commit/e26c105e527ed262cc5dc838a793841017ba316a)
|
||||
* [numaplane.numaproj.io/MonoVertexRollout](https://github.com/argoproj/argo-cd/commit/32ee00f1f494f69cc84d1881dda70ce514e1f737)
|
||||
* [helm.toolkit.fluxcd.io/HelmRelease](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImagePolicy](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImageRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [image.toolkit.fluxcd.io/ImageUpdateAutomation](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [kustomize.toolkit.fluxcd.io/Kustomization](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [notification.toolkit.fluxcd.io/Receiver](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/Bucket](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/GitRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/HelmChart](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/HelmRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
* [source.toolkit.fluxcd.io/OCIRepository](https://github.com/argoproj/argo-cd/commit/824d0dced73196bf3c148f92a164145cc115c5ea)
|
||||
|
||||
@@ -20,27 +20,3 @@ the [CLI and Application CR](https://argo-cd.readthedocs.io/en/latest/user-guide
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [platform.confluent.io/Connector](https://github.com/argoproj/argo-cd/commit/99efafb55a553a9ab962d56c20dab54ba65b7ae0)
|
||||
* [addons.cluster.x-k8s.io/ClusterResourceSet](https://github.com/argoproj/argo-cd/commit/fdf539dc6a027ef975fde23bf734f880570ccdc3)
|
||||
* [numaflow.numaproj.io/InterStepBufferService](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/MonoVertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/Pipeline](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [numaflow.numaproj.io/Vertex](https://github.com/argoproj/argo-cd/commit/82484ce758aa80334ecf66bfda28b9d5c41a8c30)
|
||||
* [acid.zalan.do/Postgresql](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [grafana.integreatly.org/Grafana](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [grafana.integreatly.org/GrafanaDatasource](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [k8s.keycloak.org/Keycloak](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [solr.apache.org/SolrCloud](https://github.com/argoproj/argo-cd/commit/19d85aa9fbb40dca452f0a0f2f9ab462e02c851d)
|
||||
* [gateway.solo.io/Gateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/MatchableHttpGateway](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/RouteOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/RouteTable](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/VirtualHostOption](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gateway.solo.io/VirtualService](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Proxy](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Settings](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/Upstream](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
* [gloo.solo.io/UpstreamGroup](https://github.com/argoproj/argo-cd/commit/2a199bc7ae70ce8b933cda81f8558916621750d5)
|
||||
@@ -288,9 +288,6 @@ resources.
|
||||
delete it. To avoid this edge case, it is recommended to perform a sync operation on your Applications, even if
|
||||
they are not out of sync, so that orphan resource detection will work as expected on the next sync.
|
||||
|
||||
After upgrading to version 3.0, the Argo CD tracking annotation will only appear on an Application’s resources when
|
||||
either a new Git commit is made or the Application is explicitly synced.
|
||||
|
||||
##### Users who rely on label-based for resources that are not managed by Argo CD
|
||||
Some users rely on label-based tracking to track resources that are not managed by Argo CD. They may set annotations
|
||||
to have Argo CD ignore the resource as extraneous or to disable pruning. If you are using label-based tracking to track
|
||||
@@ -500,8 +497,4 @@ More details for ignored resource updates in the [Diffing customization](../../u
|
||||
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* No new added health checks
|
||||
credentials of project-scoped repositories and clusters.
|
||||
@@ -63,25 +63,3 @@ to the [release notes](https://github.com/kubernetes-sigs/kustomize/releases/tag
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [core.spinkube.dev/SpinApp](https://github.com/argoproj/argo-cd/commit/7d6604404fd3b7d77124f9623a2d7a12cc24a0bb)
|
||||
* [opentelemetry.io/OpenTelemetryCollector](https://github.com/argoproj/argo-cd/commit/65464d8b77941c65499028bb14172fc40e62e38b)
|
||||
* [logstash.k8s.elastic.co/Logstash](https://github.com/argoproj/argo-cd/commit/8f1f5c7234e694a4830744f92e1b0f8d1e3cd43d)
|
||||
* [kyverno.io/Policy](https://github.com/argoproj/argo-cd/commit/e578b85410f748c6c7b4e10ff1a5fdbca09b3328)
|
||||
* [projectcontour.io/HTTPProxy](https://github.com/argoproj/argo-cd/commit/ce4b7a28cc77959fab5b6fedd14b1f9e9a4af4f7)
|
||||
* [grafana.integreatly.org/GrafanaDashboard](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
|
||||
* [grafana.integreatly.org/GrafanaFolder](https://github.com/argoproj/argo-cd/commit/5a3a10479380eb39f1c145babdf94ed1a72d054c)
|
||||
* [postgresql.cnpg.io/Cluster](https://github.com/argoproj/argo-cd/commit/f4edcf7717940e44a141dadb5ca8c5fc11951cb2)
|
||||
* [gateway.networking.k8s.io/GRPCRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [gateway.networking.k8s.io/Gateway](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [gateway.networking.k8s.io/HTTPRoute](https://github.com/argoproj/argo-cd/commit/a2152a1216cdbeaa7bd02d0b2fb225390f96c77a)
|
||||
* [rabbitmq.com/Binding](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Exchange](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Permission](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Policy](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Queue](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Shovel](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/User](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
* [rabbitmq.com/Vhost](https://github.com/argoproj/argo-cd/commit/96039be4e075e5b22781703023bfbbe5586bd081)
|
||||
@@ -86,36 +86,3 @@ If you do not want your CronJob to affect the Application's aggregated Health, y
|
||||
Due to security reasons ([GHSA-786q-9hcg-v9ff](https://github.com/argoproj/argo-cd/security/advisories/GHSA-786q-9hcg-v9ff)),
|
||||
the project API response was sanitized to remove sensitive information. This includes
|
||||
credentials of project-scoped repositories and clusters.
|
||||
|
||||
## ApplicationSet `resources` field of `status` resource is limited to 5000 elements by default
|
||||
|
||||
The `resources` field of the `status` resource of an ApplicationSet is now limited to 5000 elements by default. This is
|
||||
to prevent status bloat and exceeding etcd limits. The limit can be configured by setting the `applicationsetcontroller.status.max.resources.count`
|
||||
field in the `argocd-cmd-params-cm` ConfigMap.
|
||||
|
||||
## Added Healthchecks
|
||||
|
||||
* [datadoghq.com/DatadogMetric](https://github.com/argoproj/argo-cd/commit/5c9a5ef9a65f8e04e729fbae54a9310c0a42f6c2)
|
||||
* [CronJob](https://github.com/argoproj/argo-cd/commit/d3de4435ce86f3f85a4cc58978b2544af2ac4248)
|
||||
* [promoter.argoproj.io/ArgoCDCommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/ChangeTransferPolicy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/CommitStatus](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/PromotionStrategy](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [promoter.argoproj.io/PullRequest](https://github.com/argoproj/argo-cd/commit/36f1a59c09ad4ef384689fa0699ff7eba60f4a20)
|
||||
* [coralogix.com/Alert](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
|
||||
* [coralogix.com/RecordingRuleGroupSet](https://github.com/argoproj/argo-cd/commit/dcf1965c529790855647f036e4e7ea0323fbf812)
|
||||
* [projectcontour.io/ExtensionService](https://github.com/argoproj/argo-cd/commit/4e63bc756394d93c684b6b8e8b3856e0e6b3f199)
|
||||
* [clickhouse-keeper.altinity.com/ClickHouseKeeperInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
|
||||
* [clickhouse.altinity.com/ClickHouseInstallation](https://github.com/argoproj/argo-cd/commit/c447628913da1c0134bbb1d21a9ae366804b4a8e)
|
||||
* [apps.3scale.net/APIManager](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ActiveDoc](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ApplicationAuth](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Application](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Backend](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/CustomPolicyDefinition](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/DeveloperAccount](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/DeveloperUser](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/OpenAPI](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Product](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/ProxyConfigPromote](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
* [capabilities.3scale.net/Tenant](https://github.com/argoproj/argo-cd/commit/d954789d47276354371556ff97e6a7c3840440e0)
|
||||
@@ -5,5 +5,5 @@ mkdocs-material==7.1.8
|
||||
markdown_include==0.8.1
|
||||
pygments==2.19.2
|
||||
jinja2==3.1.6
|
||||
markdown==3.8.2
|
||||
markdown==3.9
|
||||
pymdown-extensions==10.16.1
|
||||
@@ -58,120 +58,3 @@ Argo CD performs [background cascading deletion](https://kubernetes.io/docs/conc
|
||||
|
||||
When you invoke `argocd app delete` with `--cascade`, the finalizer is added automatically.
|
||||
You can set the propagation policy with `--propagation-policy <foreground|background>`.
|
||||
|
||||
## Deleting Applications in the UI
|
||||
|
||||
Argo CD provides a consistent deletion experience across different views in the UI. When deleting applications, you can access the delete functionality from:
|
||||
|
||||
- **Applications List View**: The main applications page showing all applications
|
||||
- **Application Details View - Resource Tree**: When viewing an application's resource tree that contains child applications
|
||||
|
||||
### Consistent Deletion Behaviour
|
||||
|
||||
Starting in Argo CD 3.2, deletion behavior is now **consistent** across all UI views. Whether you delete an application from the Applications List or from the Resource Tree view, the same deletion mechanism and options are used.
|
||||
|
||||
Previously, deleting an application from the Resource Tree treated it as a generic Kubernetes resource, which could lead to unexpected behaviour with non-cascading deletes. Now, Argo CD properly detects Application resources and uses the standard Application deletion API in all contexts.
|
||||
|
||||
### Deleting Child Applications in App of Apps Pattern
|
||||
|
||||
When using the [App of Apps pattern](../operator-manual/cluster-bootstrapping.md), parent applications can contain child applications as resources. Argo CD automatically detects child applications and provides improved dialog messages to help you understand what you're deleting.
|
||||
|
||||
#### Child Application Detection
|
||||
|
||||
Argo CD identifies a child application by checking for the `app.kubernetes.io/part-of` label. If this label is present and has a non-empty value, the application is considered a child application.
|
||||
|
||||
#### Delete Dialog Differences
|
||||
|
||||
**When deleting a child application:**
|
||||
|
||||
- Dialog title: "Delete child application"
|
||||
- Confirmation prompt references "child application" to make it clear you're deleting a managed application
|
||||
- Additional warning note appears when deleting from the Resource Tree
|
||||
|
||||
**When deleting a regular application:**
|
||||
|
||||
- Dialog title: "Delete application"
|
||||
- Standard confirmation prompt
|
||||
|
||||
**When deleting from the Resource Tree:**
|
||||
|
||||
An additional informational note appears:
|
||||
|
||||
> ⚠️ **Note:** You are about to delete an Application from the resource tree. This uses the same deletion behaviour as the Applications list page.
|
||||
|
||||
This note clarifies that the deletion will use the proper Application deletion API, not generic Kubernetes resource deletion.
|
||||
|
||||
### Deletion Options (Propagation Policies)
|
||||
|
||||
When deleting an application through the UI, you can choose from three propagation policies:
|
||||
|
||||
#### 1. Foreground (Default)
|
||||
|
||||
- Deletes the application and all its managed resources
|
||||
- Waits for all managed resources to be deleted before the Application is removed
|
||||
- **Use case**: When you want to ensure all resources are cleaned up before the Application disappears
|
||||
|
||||
#### 2. Background
|
||||
|
||||
- Deletes the application and all its managed resources
|
||||
- The Application is removed immediately, and resources are deleted in the background
|
||||
- **Use case**: When you want faster Application deletion and don't need to wait for resource cleanup
|
||||
|
||||
#### 3. Non-Cascading (Orphan)
|
||||
|
||||
- Deletes **only** the Application resource
|
||||
- All managed resources (Deployments, Services, ConfigMaps, etc.) are **preserved** in the cluster
|
||||
- The finalizer is removed automatically before deletion
|
||||
- **Use case**: When you want to stop managing resources through Argo CD but keep them running
|
||||
|
||||
> [!WARNING]
|
||||
> **Important for Non-Cascading Deletes**
|
||||
>
|
||||
> When you select **Non-Cascading**, Argo CD will:
|
||||
> - Remove the `resources-finalizer.argocd.argoproj.io` finalizer from the Application
|
||||
> - Delete only the Application resource
|
||||
> - Leave all managed resources (Pods, Services, Deployments, etc.) running in the cluster
|
||||
>
|
||||
> This behaviour is now **consistent** whether you delete from the Applications List or from the Resource Tree view.
|
||||
|
||||
### Best Practices for App of Apps Pattern
|
||||
|
||||
When working with the App of Apps pattern:
|
||||
|
||||
1. **Understand the impact**: Deleting a child application with Foreground or Background propagation will delete all of its managed resources
|
||||
2. **Review before deleting**: Always verify what resources are managed by the application before performing a cascading delete
|
||||
3. **Use Non-Cascading cautiously**: If you only want to remove the Application resource but keep the deployed workloads, use Non-Cascading delete
|
||||
4. **Consider finalizers**: Ensure child applications have appropriate finalizers set based on your deletion strategy (see [Cascading Deletion](../operator-manual/cluster-bootstrapping.md#cascading-deletion))
|
||||
|
||||
### Example Scenarios
|
||||
|
||||
#### Scenario 1: Deleting a child application and all its resources
|
||||
|
||||
1. Navigate to the parent application's Resource Tree
|
||||
2. Click the kebab menu (button with the three vertical dots) on a child Application resource
|
||||
3. Select "Delete"
|
||||
4. Choose **Foreground** or **Background** propagation policy
|
||||
5. Confirm the deletion
|
||||
|
||||
**Result**: The child Application and all its managed resources (Deployments, Services, etc.) are deleted.
|
||||
|
||||
#### Scenario 2: Removing Argo CD management but keeping resources
|
||||
|
||||
1. Navigate to the Applications List or the parent application's Resource Tree
|
||||
2. Click the kebab menu (button with the three vertical dots) on a child Application resource
|
||||
3. Select "Delete"
|
||||
4. Choose **Non-Cascading** propagation policy
|
||||
5. Confirm the deletion
|
||||
|
||||
**Result**: Only the Application resource is deleted. All managed resources continue running in the cluster.
|
||||
|
||||
#### Scenario 3: Deleting from Resource Tree with context awareness
|
||||
|
||||
When you delete a child application from the Resource Tree view:
|
||||
|
||||
- Argo CD recognizes it as an Application resource (not just a generic Kubernetes resource)
|
||||
- Shows "Delete child application" dialog if it detects the `app.kubernetes.io/part-of` label
|
||||
- Displays an informational note explaining you're using the same behaviour as the Applications List
|
||||
- Provides the same three propagation policy options
|
||||
|
||||
This ensures predictable and consistent deletion behaviour regardless of where you initiate the deletion.
|
||||
|
||||
@@ -297,7 +297,6 @@ data:
|
||||
{{- if .metadata.author }}
|
||||
Co-authored-by: {{ .metadata.author }}
|
||||
{{- end }}
|
||||
```
|
||||
|
||||
### Credential Templates
|
||||
|
||||
|
||||
@@ -17,12 +17,11 @@ Adding the argocd.argoproj.io/hook annotation to a resource will assign it to a
|
||||
|
||||
## How phases work?
|
||||
|
||||
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following:
|
||||
|
||||
1. Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
|
||||
2. Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
|
||||
3. Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
|
||||
Argo CD will respect resources assigned to different phases, during a sync operation Argo CD will do the following.
|
||||
|
||||
Apply all the resources marked as PreSync hooks. If any of them fails the whole sync process will stop and will be marked as failed
|
||||
Apply all the resources marked as Sync hooks. If any of them fails the whole sync process will be marked as failed. Hooks marked with SyncFail will also run
|
||||
Apply all the resources marked as PostSync hooks. If any of them fails the whole sync process will be marked as failed.
|
||||
Hooks marked with Skip will not be applied.
|
||||
|
||||
Here is a graphical overview of the sync process:
|
||||
@@ -55,9 +54,8 @@ Argo CD also offers an alternative method of changing the sync order of resource
|
||||
Hooks and resources are assigned to wave 0 by default. The wave can be negative, so you can create a wave that runs before all other resources.
|
||||
|
||||
When a sync operation takes place, Argo CD will:
|
||||
|
||||
1. Order all resources according to their wave (lowest to highest)
|
||||
2. Apply the resources according to the resulting sequence
|
||||
Order all resources according to their wave (lowest to highest)
|
||||
Apply the resources according to the resulting sequence
|
||||
|
||||
There is currently a delay between each sync wave in order to give other controllers a chance to react to the spec change that was just applied. This also prevents Argo CD from assessing resource health too quickly (against the stale object), causing hooks to fire prematurely. The current delay between each sync wave is 2 seconds and can be configured via the environment variable ARGOCD_SYNC_WAVE_DELAY.
|
||||
|
||||
@@ -69,16 +67,16 @@ While you can use sync waves on their own, for maximum flexibility you can combi
|
||||
|
||||
When Argo CD starts a sync, it orders the resources in the following precedence:
|
||||
|
||||
1. The phase
|
||||
2. The wave they are in (lower values first)
|
||||
3. By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
|
||||
4. By name
|
||||
The phase
|
||||
The wave they are in (lower values first)
|
||||
By kind (e.g. namespaces first and then other Kubernetes resources, followed by custom resources)
|
||||
By name
|
||||
|
||||
Once the order is defined:
|
||||
|
||||
1. First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
|
||||
2. It applies resources in that wave.
|
||||
3. It repeats this process until all phases and waves are in-sync and healthy.
|
||||
First Argo CD determines the number of the first wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
|
||||
It applies resources in that wave.
|
||||
It repeats this process until all phases and waves are in-sync and healthy.
|
||||
|
||||
Because an application can have resources that are unhealthy in the first wave, it may be that the app can never get to healthy.
|
||||
|
||||
|
||||
12
go.mod
12
go.mod
@@ -6,13 +6,13 @@ require (
|
||||
code.gitea.io/sdk/gitea v0.22.0
|
||||
dario.cat/mergo v1.0.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0
|
||||
github.com/Azure/kubelogin v0.2.10
|
||||
github.com/Masterminds/semver/v3 v3.4.0
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/TomOnTime/utfutil v1.0.0
|
||||
github.com/alicebob/miniredis/v2 v2.35.0
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20250908182407-97ad5b59a627
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff
|
||||
github.com/argoproj/pkg v0.13.6
|
||||
github.com/argoproj/pkg/v2 v2.0.1
|
||||
@@ -47,7 +47,7 @@ require (
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/btree v1.1.3
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/go-github/v69 v69.2.0
|
||||
github.com/google/go-jsonnet v0.21.0
|
||||
@@ -115,7 +115,7 @@ require (
|
||||
layeh.com/gopher-json v0.0.0-20190114024228-97fed8db8427
|
||||
oras.land/oras-go/v2 v2.6.0
|
||||
sigs.k8s.io/controller-runtime v0.21.0
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.1-0.20251003215857-446d8398e19c
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
|
||||
@@ -134,7 +134,7 @@ require (
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
@@ -290,7 +290,7 @@ require (
|
||||
k8s.io/controller-manager v0.34.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kube-aggregator v0.34.0 // indirect
|
||||
k8s.io/kubernetes v1.34.2 // indirect
|
||||
k8s.io/kubernetes v1.34.0 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
|
||||
19
go.sum
19
go.sum
@@ -46,8 +46,8 @@ github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs=
|
||||
github.com/42wim/httpsig v1.2.3/go.mod h1:nZq9OlYKDrUBhptd77IHx4/sZZD+IxTBADvAPI9G/EM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||
@@ -74,8 +74,8 @@ github.com/Azure/kubelogin v0.2.10 h1:6CBXJt/RtnTPI1R1E4cfEdL+BnCKMuywtglX//FZPD
|
||||
github.com/Azure/kubelogin v0.2.10/go.mod h1:JtR+7h3NHAwQPZ+CagUZ+F1Uk3/JU0eRFwpESSnRNGU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=
|
||||
@@ -113,8 +113,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d h1:iUJYrbSvpV9n8vyl1sBt1GceM60HhHfnHxuzcm5apDg=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20251217140045-5baed5604d2d/go.mod h1:PauXVUVcfiTgC+34lDdWzPS101g4NpsUtDAjFBnWf94=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20250908182407-97ad5b59a627 h1:yntvA+uaFz62HRfWGGwlvs4ErdxoLQjCpDXufdEt2FI=
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20250908182407-97ad5b59a627/go.mod h1:yJ3t/GRn9Gx2LEyMrh9X0roL7zzVlk3nvuJt6G1o6jI=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff h1:pGGAeHIktPuYCRl1Z540XdxPFnedqyUhJK4VgpyJZfY=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20250908182349-da04400446ff/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
|
||||
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
|
||||
@@ -1434,8 +1434,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs=
|
||||
k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4=
|
||||
k8s.io/kubernetes v1.34.2 h1:WQdDvYJazkmkwSncgNwGvVtaCt4TYXIU3wSMRgvp3MI=
|
||||
k8s.io/kubernetes v1.34.2/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
|
||||
k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w=
|
||||
k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
@@ -1461,9 +1461,8 @@ sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HR
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.2.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.1-0.20251003215857-446d8398e19c h1:RCkxmWwPjOw2O1RiDgBgI6tfISvB07jAh+GEztp7TWk=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.1-0.20251003215857-446d8398e19c/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
|
||||
@@ -10,7 +10,7 @@ PATH="${INSTALL_PATH}:${PATH}"
|
||||
[ -d "$INSTALL_PATH" ] || mkdir -p "$INSTALL_PATH"
|
||||
|
||||
# renovate: datasource=github-releases depName=gotestyourself/gotestsum packageName=gotestyourself/gotestsum
|
||||
GOTESTSUM_VERSION=1.12.3
|
||||
GOTESTSUM_VERSION=1.13.0
|
||||
|
||||
OS=$(go env GOOS)
|
||||
ARCH=$(go env GOARCH)
|
||||
|
||||
@@ -187,12 +187,6 @@ spec:
|
||||
name: argocd-cmd-params-cm
|
||||
key: applicationsetcontroller.requeue.after
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
optional: true
|
||||
volumeMounts:
|
||||
- mountPath: /app/config/ssh
|
||||
name: ssh-known-hosts
|
||||
@@ -206,8 +200,6 @@ spec:
|
||||
name: tmp
|
||||
- name: argocd-repo-server-tls
|
||||
mountPath: /app/config/reposerver/tls
|
||||
- name: argocd-cmd-params-cm
|
||||
mountPath: /home/argocd/params
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
@@ -243,12 +235,5 @@ spec:
|
||||
path: tls.key
|
||||
- key: ca.crt
|
||||
path: ca.crt
|
||||
- name: argocd-cmd-params-cm
|
||||
configMap:
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.2
|
||||
newTag: latest
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.2
|
||||
newTag: latest
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
@@ -40,7 +40,7 @@ spec:
|
||||
serviceAccountName: argocd-redis
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:8.2.2-alpine
|
||||
image: redis:8.2.1-alpine
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--save"
|
||||
|
||||
@@ -239,12 +239,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: argocd-cmd-params-cm
|
||||
key: reposerver.enable.builtin.git.config
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
|
||||
53
manifests/core-install-with-hydrator.yaml
generated
53
manifests/core-install-with-hydrator.yaml
generated
@@ -1485,9 +1485,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4901,9 +4900,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4984,9 +4982,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -23740,9 +23737,6 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
resourcesCount:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
@@ -24844,13 +24838,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -24880,8 +24868,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -24910,13 +24896,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -24985,7 +24964,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25097,7 +25076,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -25113,7 +25092,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25386,12 +25365,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -25410,7 +25383,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25462,7 +25435,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -25810,7 +25783,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
51
manifests/core-install.yaml
generated
51
manifests/core-install.yaml
generated
@@ -1485,9 +1485,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4901,9 +4900,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4984,9 +4982,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -23740,9 +23737,6 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
resourcesCount:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
@@ -24812,13 +24806,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -24848,8 +24836,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -24878,13 +24864,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -24931,7 +24910,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -24947,7 +24926,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25220,12 +25199,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -25244,7 +25217,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25296,7 +25269,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -25644,7 +25617,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.2
|
||||
newTag: latest
|
||||
|
||||
15
manifests/crds/application-crd.yaml
generated
15
manifests/crds/application-crd.yaml
generated
@@ -1484,9 +1484,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4900,9 +4899,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4983,9 +4981,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
|
||||
3
manifests/crds/applicationset-crd.yaml
generated
3
manifests/crds/applicationset-crd.yaml
generated
@@ -17823,9 +17823,6 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
resourcesCount:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.2.2
|
||||
newTag: latest
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
@@ -1270,7 +1270,7 @@ spec:
|
||||
automountServiceAccountToken: false
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
{}
|
||||
@@ -1310,7 +1310,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-server
|
||||
@@ -1384,7 +1384,7 @@ spec:
|
||||
- /bin/sh
|
||||
- /readonly-config/trigger-failover-if-master.sh
|
||||
- name: sentinel
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- redis-sentinel
|
||||
@@ -1457,7 +1457,7 @@ spec:
|
||||
- sleep 30; redis-cli -p 26379 sentinel reset argocd
|
||||
|
||||
- name: split-brain-fix
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- sh
|
||||
|
||||
@@ -27,7 +27,7 @@ redis-ha:
|
||||
serviceAccount:
|
||||
automountToken: true
|
||||
image:
|
||||
tag: 8.2.2-alpine
|
||||
tag: 8.2.1-alpine
|
||||
sentinel:
|
||||
bind: '0.0.0.0'
|
||||
lifecycle:
|
||||
|
||||
65
manifests/ha/install-with-hydrator.yaml
generated
65
manifests/ha/install-with-hydrator.yaml
generated
@@ -1485,9 +1485,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4901,9 +4900,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4984,9 +4982,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -23740,9 +23737,6 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
resourcesCount:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
@@ -26210,13 +26204,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -26246,8 +26234,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -26276,13 +26262,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -26351,7 +26330,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26502,7 +26481,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -26598,7 +26577,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -26722,7 +26701,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -27021,12 +27000,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -27045,7 +27018,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -27097,7 +27070,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -27471,7 +27444,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -27855,7 +27828,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -27953,7 +27926,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
@@ -28024,7 +27997,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
postStart:
|
||||
@@ -28099,7 +28072,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: split-brain-fix
|
||||
resources: {}
|
||||
@@ -28134,7 +28107,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
securityContext:
|
||||
|
||||
63
manifests/ha/install.yaml
generated
63
manifests/ha/install.yaml
generated
@@ -1485,9 +1485,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4901,9 +4900,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4984,9 +4982,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -23740,9 +23737,6 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
resourcesCount:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
@@ -26180,13 +26174,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -26216,8 +26204,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -26246,13 +26232,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -26338,7 +26317,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -26434,7 +26413,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -26558,7 +26537,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -26857,12 +26836,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -26881,7 +26854,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26933,7 +26906,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -27307,7 +27280,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -27691,7 +27664,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -27789,7 +27762,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
@@ -27860,7 +27833,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
postStart:
|
||||
@@ -27935,7 +27908,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: split-brain-fix
|
||||
resources: {}
|
||||
@@ -27970,7 +27943,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
securityContext:
|
||||
|
||||
47
manifests/ha/namespace-install-with-hydrator.yaml
generated
47
manifests/ha/namespace-install-with-hydrator.yaml
generated
@@ -1891,13 +1891,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1927,8 +1921,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -1957,13 +1949,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -2032,7 +2017,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2183,7 +2168,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2279,7 +2264,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2403,7 +2388,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2702,12 +2687,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -2726,7 +2705,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2778,7 +2757,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3152,7 +3131,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3536,7 +3515,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -3634,7 +3613,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
@@ -3705,7 +3684,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
postStart:
|
||||
@@ -3780,7 +3759,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: split-brain-fix
|
||||
resources: {}
|
||||
@@ -3815,7 +3794,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
securityContext:
|
||||
|
||||
45
manifests/ha/namespace-install.yaml
generated
45
manifests/ha/namespace-install.yaml
generated
@@ -1861,13 +1861,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1897,8 +1891,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -1927,13 +1919,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -2019,7 +2004,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2115,7 +2100,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2239,7 +2224,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2538,12 +2523,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -2562,7 +2541,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2614,7 +2593,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2988,7 +2967,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3372,7 +3351,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
@@ -3470,7 +3449,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
@@ -3541,7 +3520,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
postStart:
|
||||
@@ -3616,7 +3595,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: split-brain-fix
|
||||
resources: {}
|
||||
@@ -3651,7 +3630,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: config-init
|
||||
securityContext:
|
||||
|
||||
59
manifests/install-with-hydrator.yaml
generated
59
manifests/install-with-hydrator.yaml
generated
@@ -1485,9 +1485,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4901,9 +4900,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4984,9 +4982,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -23740,9 +23737,6 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
resourcesCount:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
@@ -25288,13 +25282,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -25324,8 +25312,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -25354,13 +25340,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -25429,7 +25408,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25580,7 +25559,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -25676,7 +25655,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -25762,7 +25741,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -25778,7 +25757,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -26051,12 +26030,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -26075,7 +26048,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -26127,7 +26100,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -26499,7 +26472,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -26883,7 +26856,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
57
manifests/install.yaml
generated
57
manifests/install.yaml
generated
@@ -1485,9 +1485,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4901,9 +4900,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -4984,9 +4982,8 @@ spec:
|
||||
pattern: ^.{2,}|[^./]$
|
||||
type: string
|
||||
targetBranch:
|
||||
description: |-
|
||||
TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
description: TargetBranch is the branch to which hydrated
|
||||
manifests should be committed
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
@@ -23740,9 +23737,6 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
resourcesCount:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
@@ -25256,13 +25250,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -25292,8 +25280,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -25322,13 +25308,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -25414,7 +25393,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -25510,7 +25489,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -25596,7 +25575,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -25612,7 +25591,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -25885,12 +25864,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -25909,7 +25882,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -25961,7 +25934,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -26333,7 +26306,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -26717,7 +26690,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
41
manifests/namespace-install-with-hydrator.yaml
generated
41
manifests/namespace-install-with-hydrator.yaml
generated
@@ -969,13 +969,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1005,8 +999,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -1035,13 +1027,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -1110,7 +1095,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1261,7 +1246,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1357,7 +1342,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1443,7 +1428,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -1459,7 +1444,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1732,12 +1717,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -1756,7 +1735,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1808,7 +1787,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2180,7 +2159,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2564,7 +2543,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
39
manifests/namespace-install.yaml
generated
39
manifests/namespace-install.yaml
generated
@@ -937,13 +937,7 @@ spec:
|
||||
key: applicationsetcontroller.requeue.after
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -973,8 +967,6 @@ spec:
|
||||
name: tmp
|
||||
- mountPath: /app/config/reposerver/tls
|
||||
name: argocd-repo-server-tls
|
||||
- mountPath: /home/argocd/params
|
||||
name: argocd-cmd-params-cm
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: argocd-applicationset-controller
|
||||
@@ -1003,13 +995,6 @@ spec:
|
||||
path: ca.crt
|
||||
optional: true
|
||||
secretName: argocd-repo-server-tls
|
||||
- configMap:
|
||||
items:
|
||||
- key: applicationsetcontroller.profile.enabled
|
||||
path: profiler.enabled
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
name: argocd-cmd-params-cm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -1095,7 +1080,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1191,7 +1176,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1277,7 +1262,7 @@ spec:
|
||||
secretKeyRef:
|
||||
key: auth
|
||||
name: argocd-redis
|
||||
image: public.ecr.aws/docker/library/redis:8.2.2-alpine
|
||||
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
|
||||
imagePullPolicy: Always
|
||||
name: redis
|
||||
ports:
|
||||
@@ -1293,7 +1278,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1566,12 +1551,6 @@ spec:
|
||||
key: reposerver.git.request.timeout
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: reposerver.enable.builtin.git.config
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
- name: ARGOCD_GRPC_MAX_SIZE_MB
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@@ -1590,7 +1569,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1642,7 +1621,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /var/run/argocd/argocd-cmp-server
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2014,7 +1993,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2398,7 +2377,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.2.2
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -63,7 +63,6 @@ nav:
|
||||
- operator-manual/web_based_terminal.md
|
||||
- operator-manual/config-management-plugins.md
|
||||
- operator-manual/deep_links.md
|
||||
- operator-manual/git_configuration.md
|
||||
- Notifications:
|
||||
- Overview: operator-manual/notifications/index.md
|
||||
- operator-manual/notifications/triggers.md
|
||||
|
||||
@@ -805,9 +805,6 @@ type ApplicationSetStatus struct {
|
||||
ApplicationStatus []ApplicationSetApplicationStatus `json:"applicationStatus,omitempty" protobuf:"bytes,2,name=applicationStatus"`
|
||||
// Resources is a list of Applications resources managed by this application set.
|
||||
Resources []ResourceStatus `json:"resources,omitempty" protobuf:"bytes,3,opt,name=resources"`
|
||||
// ResourcesCount is the total number of resources managed by this application set. The count may be higher than actual number of items in the Resources field when
|
||||
// the number of managed resources exceeds the limit imposed by the controller (to avoid making the status field too large).
|
||||
ResourcesCount int64 `json:"resourcesCount,omitempty" protobuf:"varint,4,opt,name=resourcesCount"`
|
||||
}
|
||||
|
||||
// ApplicationSetCondition contains details about an applicationset condition, which is usually an error or warning
|
||||
@@ -870,6 +867,20 @@ const (
|
||||
ApplicationSetReasonSyncApplicationError = "SyncApplicationError"
|
||||
)
|
||||
|
||||
// Represents resource health status
|
||||
type ProgressiveSyncStatusCode string
|
||||
|
||||
const (
|
||||
// Indicates that an Application sync is waiting to be trigerred
|
||||
ProgressiveSyncWaiting ProgressiveSyncStatusCode = "Waiting"
|
||||
// Indicates that a sync has been trigerred, but the application did not report any status
|
||||
ProgressiveSyncPending ProgressiveSyncStatusCode = "Pending"
|
||||
// Indicates that the application has not yet reached an Healthy state in regards to the requested sync
|
||||
ProgressiveSyncProgressing ProgressiveSyncStatusCode = "Progressing"
|
||||
// Indicates that the application has reached an Healthy state in regards to the requested sync
|
||||
ProgressiveSyncHealthy ProgressiveSyncStatusCode = "Healthy"
|
||||
)
|
||||
|
||||
// ApplicationSetApplicationStatus contains details about each Application managed by the ApplicationSet
|
||||
type ApplicationSetApplicationStatus struct {
|
||||
// Application contains the name of the Application resource
|
||||
@@ -878,8 +889,8 @@ type ApplicationSetApplicationStatus struct {
|
||||
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,2,opt,name=lastTransitionTime"`
|
||||
// Message contains human-readable message indicating details about the status
|
||||
Message string `json:"message" protobuf:"bytes,3,opt,name=message"`
|
||||
// Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)
|
||||
Status string `json:"status" protobuf:"bytes,4,opt,name=status"`
|
||||
// Status contains the AppSet's perceived status of the managed Application resource
|
||||
Status ProgressiveSyncStatusCode `json:"status" protobuf:"bytes,4,opt,name=status"`
|
||||
// Step tracks which step this Application should be updated in
|
||||
Step string `json:"step" protobuf:"bytes,5,opt,name=step"`
|
||||
// TargetRevision tracks the desired revisions the Application should be synced to.
|
||||
|
||||
1575
pkg/apis/application/v1alpha1/generated.pb.go
generated
1575
pkg/apis/application/v1alpha1/generated.pb.go
generated
File diff suppressed because it is too large
Load Diff
@@ -222,7 +222,7 @@ message ApplicationSetApplicationStatus {
|
||||
// Message contains human-readable message indicating details about the status
|
||||
optional string message = 3;
|
||||
|
||||
// Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)
|
||||
// Status contains the AppSet's perceived status of the managed Application resource
|
||||
optional string status = 4;
|
||||
|
||||
// Step tracks which step this Application should be updated in
|
||||
@@ -369,10 +369,6 @@ message ApplicationSetStatus {
|
||||
|
||||
// Resources is a list of Applications resources managed by this application set.
|
||||
repeated ResourceStatus resources = 3;
|
||||
|
||||
// ResourcesCount is the total number of resources managed by this application set. The count may be higher than actual number of items in the Resources field when
|
||||
// the number of managed resources exceeds the limit imposed by the controller (to avoid making the status field too large).
|
||||
optional int64 resourcesCount = 4;
|
||||
}
|
||||
|
||||
// ApplicationSetStrategy configures how generated Applications are updated in sequence.
|
||||
@@ -2639,8 +2635,7 @@ message SyncPolicyAutomated {
|
||||
// SyncSource specifies a location from which hydrated manifests may be synced. RepoURL is assumed based on the
|
||||
// associated DrySource config in the SourceHydrator.
|
||||
message SyncSource {
|
||||
// TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
// If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
// TargetBranch is the branch to which hydrated manifests should be committed
|
||||
optional string targetBranch = 1;
|
||||
|
||||
// Path is a directory path within the git repository where hydrated manifests should be committed to and synced
|
||||
|
||||
@@ -443,8 +443,7 @@ type DrySource struct {
|
||||
// SyncSource specifies a location from which hydrated manifests may be synced. RepoURL is assumed based on the
|
||||
// associated DrySource config in the SourceHydrator.
|
||||
type SyncSource struct {
|
||||
// TargetBranch is the branch from which hydrated manifests will be synced.
|
||||
// If HydrateTo is not set, this is also the branch to which hydrated manifests are committed.
|
||||
// TargetBranch is the branch to which hydrated manifests should be committed
|
||||
TargetBranch string `json:"targetBranch" protobuf:"bytes,1,name=targetBranch"`
|
||||
// Path is a directory path within the git repository where hydrated manifests should be committed to and synced
|
||||
// from. The Path should never point to the root of the repo. If hydrateTo is set, this is just the path from which
|
||||
|
||||
3
renovate.json
Normal file
3
renovate.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
|
||||
}
|
||||
@@ -117,7 +117,6 @@ type RepoServerInitConstants struct {
|
||||
DisableHelmManifestMaxExtractedSize bool
|
||||
IncludeHiddenDirectories bool
|
||||
CMPUseManifestGeneratePaths bool
|
||||
EnableBuiltinGitConfig bool
|
||||
}
|
||||
|
||||
var manifestGenerateLock = sync.NewKeyLock()
|
||||
@@ -2567,9 +2566,7 @@ func (s *Service) newClient(repo *v1alpha1.Repository, opts ...git.ClientOpts) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts,
|
||||
git.WithEventHandlers(metrics.NewGitClientEventHandlers(s.metricsServer)),
|
||||
git.WithBuiltinGitConfig(s.initConstants.EnableBuiltinGitConfig))
|
||||
opts = append(opts, git.WithEventHandlers(metrics.NewGitClientEventHandlers(s.metricsServer)))
|
||||
return s.newGitClient(repo.Repo, repoPath, repo.GetGitCreds(s.gitCredsStore), repo.IsInsecure(), repo.EnableLFS, repo.Proxy, repo.NoProxy, opts...)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
-- Health check copied from here: https://github.com/crossplane/docs/blob/709889c5dbe6e5a2ea3dffd66fe276cf465b47b5/content/master/guides/crossplane-with-argo-cd.md
|
||||
-- Health check copied from here: https://github.com/crossplane/docs/blob/bd701357e9d5eecf529a0b42f23a78850a6d1d87/content/master/guides/crossplane-with-argo-cd.md
|
||||
|
||||
health_status = {
|
||||
status = "Progressing",
|
||||
@@ -18,10 +18,9 @@ local has_no_status = {
|
||||
"Composition",
|
||||
"CompositionRevision",
|
||||
"DeploymentRuntimeConfig",
|
||||
"ClusterProviderConfig",
|
||||
"ControllerConfig",
|
||||
"ProviderConfig",
|
||||
"ProviderConfigUsage",
|
||||
"ControllerConfig" -- Added to ensure that healthcheck is backwards-compatible with Crossplane v1
|
||||
"ProviderConfigUsage"
|
||||
}
|
||||
if obj.status == nil or next(obj.status) == nil and contains(has_no_status, obj.kind) then
|
||||
health_status.status = "Healthy"
|
||||
@@ -30,7 +29,7 @@ if obj.status == nil or next(obj.status) == nil and contains(has_no_status, obj.
|
||||
end
|
||||
|
||||
if obj.status == nil or next(obj.status) == nil or obj.status.conditions == nil then
|
||||
if (obj.kind == "ProviderConfig" or obj.kind == "ClusterProviderConfig") and obj.status.users ~= nil then
|
||||
if obj.kind == "ProviderConfig" and obj.status.users ~= nil then
|
||||
health_status.status = "Healthy"
|
||||
health_status.message = "Resource is in use."
|
||||
return health_status
|
||||
@@ -55,7 +54,7 @@ for i, condition in ipairs(obj.status.conditions) do
|
||||
end
|
||||
end
|
||||
|
||||
if contains({"Ready", "Healthy", "Offered", "Established", "ValidPipeline", "RevisionHealthy"}, condition.type) then
|
||||
if contains({"Ready", "Healthy", "Offered", "Established"}, condition.type) then
|
||||
if condition.status == "True" then
|
||||
health_status.status = "Healthy"
|
||||
health_status.message = "Resource is up-to-date."
|
||||
|
||||
@@ -3,7 +3,3 @@ tests:
|
||||
status: Healthy
|
||||
message: "Resource is up-to-date."
|
||||
inputPath: testdata/composition_healthy.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: "Resource is up-to-date."
|
||||
inputPath: testdata/configurationrevision_healthy.yaml
|
||||
@@ -1,22 +0,0 @@
|
||||
apiVersion: pkg.crossplane.io/v1
|
||||
kind: ConfigurationRevision
|
||||
metadata:
|
||||
annotations:
|
||||
meta.crossplane.io/license: Apache-2.0
|
||||
meta.crossplane.io/maintainer: Upbound <support@upbound.io>
|
||||
meta.crossplane.io/source: github.com/upbound/configuration-getting-started
|
||||
name: upbound-configuration-getting-started-869bca254eb1
|
||||
spec:
|
||||
desiredState: Active
|
||||
ignoreCrossplaneConstraints: false
|
||||
image: xpkg.upbound.io/upbound/configuration-getting-started:v0.3.0
|
||||
packagePullPolicy: IfNotPresent
|
||||
revision: 1
|
||||
skipDependencyResolution: false
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: "2025-09-29T18:06:40Z"
|
||||
observedGeneration: 1
|
||||
reason: HealthyPackageRevision
|
||||
status: "True"
|
||||
type: RevisionHealthy
|
||||
@@ -1,4 +1,4 @@
|
||||
-- Health check copied from here: https://github.com/crossplane/docs/blob/709889c5dbe6e5a2ea3dffd66fe276cf465b47b5/content/master/guides/crossplane-with-argo-cd.md
|
||||
-- Health check copied from here: https://github.com/crossplane/docs/blob/bd701357e9d5eecf529a0b42f23a78850a6d1d87/content/master/guides/crossplane-with-argo-cd.md
|
||||
|
||||
health_status = {
|
||||
status = "Progressing",
|
||||
@@ -15,7 +15,6 @@ local function contains (table, val)
|
||||
end
|
||||
|
||||
local has_no_status = {
|
||||
"ClusterProviderConfig",
|
||||
"ProviderConfig",
|
||||
"ProviderConfigUsage"
|
||||
}
|
||||
@@ -27,7 +26,7 @@ if obj.status == nil or next(obj.status) == nil and contains(has_no_status, obj.
|
||||
end
|
||||
|
||||
if obj.status == nil or next(obj.status) == nil or obj.status.conditions == nil then
|
||||
if (obj.kind == "ProviderConfig" or obj.kind == "ClusterProviderConfig") and obj.status.users ~= nil then
|
||||
if obj.kind == "ProviderConfig" and obj.status.users ~= nil then
|
||||
health_status.status = "Healthy"
|
||||
health_status.message = "Resource is in use."
|
||||
return health_status
|
||||
|
||||
@@ -7,6 +7,3 @@ discoveryTests:
|
||||
- inputPath: testdata/external-secret.yaml
|
||||
result:
|
||||
- name: "refresh"
|
||||
- inputPath: testdata/external-secret-refresh-policy.yaml
|
||||
result:
|
||||
- name: "refresh"
|
||||
|
||||
@@ -3,11 +3,10 @@ local actions = {}
|
||||
local disable_refresh = false
|
||||
local time_units = {"ns", "us", "µs", "ms", "s", "m", "h"}
|
||||
local digits = obj.spec.refreshInterval
|
||||
local policy = obj.spec.refreshPolicy
|
||||
if digits ~= nil then
|
||||
digits = tostring(digits)
|
||||
for _, time_unit in ipairs(time_units) do
|
||||
if (digits == "0" or digits == "0" .. time_unit) and policy ~= "OnChange" then
|
||||
if digits == "0" or digits == "0" .. time_unit then
|
||||
disable_refresh = true
|
||||
break
|
||||
end
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
apiVersion: external-secrets.io/v1alpha1
|
||||
kind: ExternalSecret
|
||||
metadata:
|
||||
creationTimestamp: '2021-11-16T21:59:33Z'
|
||||
generation: 1
|
||||
name: test-healthy
|
||||
namespace: argocd
|
||||
resourceVersion: '136487331'
|
||||
selfLink: /apis/external-secrets.io/v1alpha1/namespaces/argocd/externalsecrets/test-healthy
|
||||
uid: 1e754a7e-0781-4d57-932d-4651d5b19586
|
||||
spec:
|
||||
data:
|
||||
- remoteRef:
|
||||
key: secret/sa/example
|
||||
property: api.address
|
||||
secretKey: url
|
||||
- remoteRef:
|
||||
key: secret/sa/example
|
||||
property: ca.crt
|
||||
secretKey: ca
|
||||
- remoteRef:
|
||||
key: secret/sa/example
|
||||
property: token
|
||||
secretKey: token
|
||||
refreshInterval: 0
|
||||
refreshPolicy: OnChange
|
||||
secretStoreRef:
|
||||
kind: SecretStore
|
||||
name: example
|
||||
target:
|
||||
creationPolicy: Owner
|
||||
template:
|
||||
data:
|
||||
config: |
|
||||
{
|
||||
"bearerToken": "{{ .token | base64decode | toString }}",
|
||||
"tlsClientConfig": {
|
||||
"insecure": false,
|
||||
"caData": "{{ .ca | toString }}"
|
||||
}
|
||||
}
|
||||
name: cluster-test
|
||||
server: '{{ .url | toString }}'
|
||||
metadata:
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: cluster
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2021-11-16T21:59:34Z'
|
||||
message: Secret was synced
|
||||
reason: SecretSynced
|
||||
status: 'True'
|
||||
type: Ready
|
||||
refreshTime: '2021-11-29T18:32:24Z'
|
||||
syncedResourceVersion: 1-519a61da0dc68b2575b4f8efada70e42
|
||||
@@ -9,39 +9,10 @@ function checkConditions(conditions, conditionType)
|
||||
return true
|
||||
end
|
||||
|
||||
-- isParentGenerationObserved checks if a parent's conditions match the current resource generation
|
||||
-- For HTTPRoute, observedGeneration is stored in each condition within a parent
|
||||
function isParentGenerationObserved(obj, parent)
|
||||
if obj.metadata.generation == nil then
|
||||
-- If no generation is set, accept all conditions
|
||||
return true
|
||||
end
|
||||
|
||||
if parent.conditions == nil or #parent.conditions == 0 then
|
||||
return false
|
||||
end
|
||||
|
||||
-- Check if all conditions have observedGeneration matching current generation
|
||||
for _, condition in ipairs(parent.conditions) do
|
||||
if condition.observedGeneration ~= nil then
|
||||
if condition.observedGeneration ~= obj.metadata.generation then
|
||||
return false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
if obj.status ~= nil then
|
||||
if obj.status.parents ~= nil then
|
||||
for _, parent in ipairs(obj.status.parents) do
|
||||
if parent.conditions ~= nil then
|
||||
-- Skip this parent if it's not from the current generation
|
||||
if not isParentGenerationObserved(obj, parent) then
|
||||
goto continue
|
||||
end
|
||||
|
||||
local resolvedRefsFalse, resolvedRefsMsg = checkConditions(parent.conditions, "ResolvedRefs")
|
||||
local acceptedFalse, acceptedMsg = checkConditions(parent.conditions, "Accepted")
|
||||
|
||||
@@ -73,20 +44,15 @@ if obj.status ~= nil then
|
||||
hs.message = "Parent " .. (parent.parentRef.name or "") .. ": " .. progressingMsg
|
||||
return hs
|
||||
end
|
||||
|
||||
::continue::
|
||||
end
|
||||
end
|
||||
|
||||
if #obj.status.parents > 0 then
|
||||
for _, parent in ipairs(obj.status.parents) do
|
||||
if parent.conditions ~= nil and #parent.conditions > 0 then
|
||||
-- Only mark as healthy if we found a parent from the current generation
|
||||
if isParentGenerationObserved(obj, parent) then
|
||||
hs.status = "Healthy"
|
||||
hs.message = "HTTPRoute is healthy"
|
||||
return hs
|
||||
end
|
||||
hs.status = "Healthy"
|
||||
hs.message = "HTTPRoute is healthy"
|
||||
return hs
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -14,8 +14,4 @@ tests:
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: "Parent example-gateway: Route is still being programmed"
|
||||
inputPath: testdata/progressing.yaml
|
||||
- healthStatus:
|
||||
status: Healthy
|
||||
message: HTTPRoute is healthy
|
||||
inputPath: testdata/healthy_multiple_generations.yaml
|
||||
inputPath: testdata/progressing.yaml
|
||||
@@ -1,59 +0,0 @@
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: example-httproute
|
||||
generation: 2
|
||||
spec:
|
||||
parentRefs:
|
||||
- kind: Gateway
|
||||
name: eg
|
||||
namespace: envoy-gateway-system
|
||||
sectionName: foo-nonexistent
|
||||
hostnames:
|
||||
- "example-httproute.example.com"
|
||||
rules:
|
||||
- backendRefs:
|
||||
- name: example-service
|
||||
port: 8080
|
||||
status:
|
||||
parents:
|
||||
- conditions:
|
||||
- lastTransitionTime: "2025-10-14T11:19:41Z"
|
||||
message: No listeners match this parent ref
|
||||
observedGeneration: 1
|
||||
reason: NoMatchingParent
|
||||
status: "False"
|
||||
type: Accepted
|
||||
- lastTransitionTime: "2025-10-14T11:19:41Z"
|
||||
message: Resolved all the Object references for the Route
|
||||
observedGeneration: 1
|
||||
reason: ResolvedRefs
|
||||
status: "True"
|
||||
type: ResolvedRefs
|
||||
controllerName: gateway.envoyproxy.io/gatewayclass-controller
|
||||
parentRef:
|
||||
group: gateway.networking.k8s.io
|
||||
kind: Gateway
|
||||
name: eg
|
||||
namespace: envoy-gateway-system
|
||||
sectionName: foo-nonexistent
|
||||
- conditions:
|
||||
- lastTransitionTime: "2025-10-14T11:25:18Z"
|
||||
message: Route is accepted
|
||||
observedGeneration: 2
|
||||
reason: Accepted
|
||||
status: "True"
|
||||
type: Accepted
|
||||
- lastTransitionTime: "2025-10-14T11:25:18Z"
|
||||
message: Resolved all the Object references for the Route
|
||||
observedGeneration: 2
|
||||
reason: ResolvedRefs
|
||||
status: "True"
|
||||
type: ResolvedRefs
|
||||
controllerName: gateway.envoyproxy.io/gatewayclass-controller
|
||||
parentRef:
|
||||
group: gateway.networking.k8s.io
|
||||
kind: Gateway
|
||||
name: eg
|
||||
namespace: envoy-gateway-system
|
||||
sectionName: https-net
|
||||
@@ -25,17 +25,9 @@ if obj.status.conditions then
|
||||
hs.message = "Waiting for Argo CD commit status spec update to be observed"
|
||||
return hs
|
||||
end
|
||||
-- Check for any False condition status
|
||||
if condition.status == "False" then
|
||||
if condition.status == "False" and condition.reason == "ReconciliationError" then
|
||||
hs.status = "Degraded"
|
||||
local msg = condition.message or "Unknown error"
|
||||
local reason = condition.reason or "Unknown"
|
||||
-- Don't include ReconciliationError in the message since it's redundant
|
||||
if reason == "ReconciliationError" then
|
||||
hs.message = "Argo CD commit status reconciliation failed: " .. msg
|
||||
else
|
||||
hs.message = "Argo CD commit status reconciliation failed (" .. reason .. "): " .. msg
|
||||
end
|
||||
hs.message = "Argo CD commit status reconciliation failed: " .. (condition.message or "Unknown error")
|
||||
return hs
|
||||
end
|
||||
end
|
||||
|
||||
@@ -15,10 +15,6 @@ tests:
|
||||
status: Degraded
|
||||
message: "Argo CD commit status reconciliation failed: Something went wrong"
|
||||
inputPath: testdata/reconcile-error.yaml
|
||||
- healthStatus:
|
||||
status: Degraded
|
||||
message: "Argo CD commit status reconciliation failed: Failed to query Argo CD applications: connection timeout"
|
||||
inputPath: testdata/non-reconciliation-error.yaml
|
||||
- healthStatus:
|
||||
status: Progressing
|
||||
message: Argo CD commit status is not ready yet
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: promoter.argoproj.io/v1alpha1
|
||||
kind: ArgoCDCommitStatus
|
||||
metadata:
|
||||
name: test-commit-status
|
||||
namespace: test
|
||||
generation: 1
|
||||
spec:
|
||||
applicationSelector:
|
||||
matchLabels:
|
||||
environment: production
|
||||
promotionStrategyRef:
|
||||
name: test
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: '2025-10-15T16:00:00Z'
|
||||
message: 'Failed to query Argo CD applications: connection timeout'
|
||||
observedGeneration: 1
|
||||
reason: ReconciliationError
|
||||
status: 'False'
|
||||
type: Ready
|
||||
|
||||
@@ -26,17 +26,9 @@ if obj.status.conditions then
|
||||
hs.message = "Waiting for change transfer policy spec update to be observed"
|
||||
return hs
|
||||
end
|
||||
-- Check for any False condition status
|
||||
if condition.status == "False" then
|
||||
if condition.status == "False" and condition.reason == "ReconciliationError" then
|
||||
hs.status = "Degraded"
|
||||
local msg = condition.message or "Unknown error"
|
||||
local reason = condition.reason or "Unknown"
|
||||
-- Don't include ReconciliationError in the message since it's redundant
|
||||
if reason == "ReconciliationError" then
|
||||
hs.message = "Change transfer policy reconciliation failed: " .. msg
|
||||
else
|
||||
hs.message = "Change transfer policy reconciliation failed (" .. reason .. "): " .. msg
|
||||
end
|
||||
hs.message = "Change transfer policy reconciliation failed: " .. (condition.message or "Unknown error")
|
||||
return hs
|
||||
end
|
||||
end
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user