Compare commits

...

13 Commits

Author SHA1 Message Date
argoproj-renovate[bot]
f04ca4a967 chore(deps): update group node
Signed-off-by: renovate[bot] <renovate[bot]@users.noreply.github.com>
2025-09-17 18:05:40 +00:00
dependabot[bot]
9ef837c326 chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.11.0 to 1.12.0 (#24593)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-17 13:44:21 -04:00
argoproj-renovate[bot]
c11d35a20f chore(deps): update dependency gotestyourself/gotestsum to v1.13.0 (#24610)
Signed-off-by: renovate[bot] <renovate[bot]@users.noreply.github.com>
Co-authored-by: argoproj-renovate[bot] <161757507+argoproj-renovate[bot]@users.noreply.github.com>
2025-09-17 13:43:28 -04:00
renovate[bot]
a7a07e2cd8 chore(deps): update dependency normalize-url to v4.5.1 [security] (#24607)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-17 13:40:30 -04:00
argoproj-renovate[bot]
9faa6098ed chore(deps): update dependency markdown to v3.9 (#24611)
Signed-off-by: renovate[bot] <renovate[bot]@users.noreply.github.com>
Co-authored-by: argoproj-renovate[bot] <161757507+argoproj-renovate[bot]@users.noreply.github.com>
2025-09-17 13:21:32 -04:00
argoproj-renovate[bot]
0fb6c51f9d chore(deps): update group golang to v1.25.1 (#24605)
Signed-off-by: renovate[bot] <renovate[bot]@users.noreply.github.com>
Co-authored-by: argoproj-renovate[bot] <161757507+argoproj-renovate[bot]@users.noreply.github.com>
2025-09-17 13:13:51 -04:00
Siva Sathwik Kommi
dbef22c843 fix: Fixed inconsistent alignment of titles and headings in status panel (#23160)
Signed-off-by: sivasath16 <sivasathwik.kommi@gmail.com>
Signed-off-by: Siva Sathwik Kommi <sivasathwik.kommi@gmail.com>
2025-09-17 21:33:02 +05:30
Michael Crenshaw
47142b89f4 chore(ci): enable Renovate (#24602)
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
2025-09-17 16:02:30 +00:00
José Maia
98a22612dd docs: Delete dangling word in Source Hydrator docs (#24601)
Signed-off-by: José Maia <josecbmaia@hotmail.com>
2025-09-17 11:34:22 -04:00
Blake Pettersson
6cce4b29b9 chore(ci): don't run renovate on forks (#24600)
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
2025-09-17 09:43:13 -04:00
Revital Barletz
9087ad7282 docs: fix inconsistency in application health example (#24585)
Signed-off-by: Revital Barletz <Revital.barletz@octopus.com>
Signed-off-by: Dan Garfield <dan@codefresh.io>
Co-authored-by: Dan Garfield <dan@codefresh.io>
2025-09-17 09:08:13 +00:00
Alexandre Gaudreault
c377101491 fix(appset): progressive sync loop when application has sync errors (#24507)
Signed-off-by: Alexandre Gaudreault <alexandre_gaudreault@intuit.com>
2025-09-16 17:56:55 +00:00
Papapetrou Patroklos
1d13ebc372 chore: bumps redis version to 8.2.1 (#24523)
Signed-off-by: Patroklos Papapetrou <ppapapetrou76@gmail.com>
2025-09-16 09:46:25 -04:00
41 changed files with 2139 additions and 2434 deletions

View File

@@ -14,7 +14,7 @@ on:
env:
# Golang version to use across CI steps
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.25.0'
GOLANG_VERSION: '1.25.1'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -308,7 +308,7 @@ jobs:
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with:
# renovate: datasource=node-version packageName=node versioning=node
node-version: '22.9.0'
node-version: '22.19.0'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
@@ -496,7 +496,7 @@ jobs:
run: |
docker pull ghcr.io/dexidp/dex:v2.43.0
docker pull argoproj/argo-cd-ci-builder:v1.0.0
docker pull redis:7.2.7-alpine
docker pull redis:8.2.1-alpine
- name: Create target directory for binaries in the build-process
run: |
mkdir -p dist

View File

@@ -53,7 +53,7 @@ jobs:
with:
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.25.1
platforms: ${{ needs.set-vars.outputs.platforms }}
push: false
@@ -70,7 +70,7 @@ jobs:
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.25.1
platforms: ${{ needs.set-vars.outputs.platforms }}
push: true
secrets:

View File

@@ -11,7 +11,7 @@ permissions: {}
env:
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.25.0' # Note: go-version must also be set in job argocd-image.with.go-version
GOLANG_VERSION: '1.25.1' # Note: go-version must also be set in job argocd-image.with.go-version
jobs:
argocd-image:
@@ -25,7 +25,7 @@ jobs:
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.25.0
go-version: 1.25.1
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
push: true
secrets:

View File

@@ -10,6 +10,7 @@ permissions:
jobs:
renovate:
runs-on: ubuntu-latest
if: github.repository == 'argoproj/argo-cd'
steps:
- name: Get token
id: get_token

View File

@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS builder
FROM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 AS builder
WORKDIR /tmp
@@ -85,7 +85,7 @@ WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:e643c0b70dca9704dff42e12b17f5b719dbe4f95e6392fc2dfa0c5f02ea8044d AS argocd-ui
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.11.1@sha256:9a25b5a6f9a90218b73a62205f111e71de5e4289aee952b4dd7e86f7498f2544 AS argocd-ui
WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS argocd-build
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 AS argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd

View File

@@ -1,4 +1,4 @@
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6
FROM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -37,6 +37,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -46,6 +47,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/argoproj/gitops-engine/pkg/health"
"github.com/argoproj/argo-cd/v3/applicationset/controllers/template"
"github.com/argoproj/argo-cd/v3/applicationset/generators"
"github.com/argoproj/argo-cd/v3/applicationset/metrics"
@@ -226,8 +229,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, fmt.Errorf("failed to get update resources status for application set: %w", err)
}
// appMap is a name->app collection of Applications in this ApplicationSet.
appMap := map[string]argov1alpha1.Application{}
// appSyncMap tracks which apps will be synced during this reconciliation.
appSyncMap := map[string]bool{}
@@ -241,25 +242,13 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
}
} else if isRollingSyncStrategy(&applicationSetInfo) {
// The appset uses progressive sync with `RollingSync` strategy
for _, app := range currentApplications {
appMap[app.Name] = app
}
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications, appMap)
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
}
}
}
var validApps []argov1alpha1.Application
for i := range generatedApplications {
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
validApps = append(validApps, generatedApplications[i])
}
}
if len(validateErrors) > 0 {
errorApps := make([]string, 0, len(validateErrors))
for key := range validateErrors {
@@ -287,13 +276,25 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
)
}
var validApps []argov1alpha1.Application
for i := range generatedApplications {
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
validApps = append(validApps, generatedApplications[i])
}
}
if r.EnableProgressiveSyncs {
// trigger appropriate application syncs if RollingSync strategy is enabled
if progressiveSyncsRollingSyncStrategyEnabled(&applicationSetInfo) {
validApps = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
validApps = r.syncDesiredApplications(logCtx, &applicationSetInfo, appSyncMap, validApps)
}
}
// Sort apps by name so they are updated/created in the same order, and condition errors are the same
sort.Slice(validApps, func(i, j int) bool {
return validApps[i].Name < validApps[j].Name
})
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowUpdate() {
err = r.createOrUpdateInCluster(ctx, logCtx, applicationSetInfo, validApps)
if err != nil {
@@ -325,6 +326,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
// Delete the generatedApplications instead of the validApps because we want to be able to delete applications in error/invalid state
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, generatedApplications)
if err != nil {
_ = r.setApplicationSetStatusCondition(ctx,
@@ -931,7 +933,7 @@ func (r *ApplicationSetReconciler) removeOwnerReferencesOnDeleteAppSet(ctx conte
return nil
}
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application) (map[string]bool, error) {
appDependencyList, appStepMap := r.buildAppDependencyList(logCtx, appset, desiredApplications)
_, err := r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
@@ -940,21 +942,21 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
}
logCtx.Infof("ApplicationSet %v step list:", appset.Name)
for i, step := range appDependencyList {
logCtx.Infof("step %v: %+v", i+1, step)
for stepIndex, applicationNames := range appDependencyList {
logCtx.Infof("step %v: %+v", stepIndex+1, applicationNames)
}
appSyncMap := r.buildAppSyncMap(appset, appDependencyList, appMap)
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
appsToSync := r.getAppsToSync(appset, appDependencyList, applications)
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appsToSync)
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap)
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appsToSync, appStepMap)
if err != nil {
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
}
_ = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
return appSyncMap, nil
return appsToSync, nil
}
// this list tracks which Applications belong to each RollingUpdate step
@@ -1028,55 +1030,53 @@ func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov
return valueMatched
}
// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop
func (r *ApplicationSetReconciler) buildAppSyncMap(applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) map[string]bool {
// getAppsToSync returns a Map of Applications that should be synced in this progressive sync wave
func (r *ApplicationSetReconciler) getAppsToSync(applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, currentApplications []argov1alpha1.Application) map[string]bool {
appSyncMap := map[string]bool{}
syncEnabled := true
currentAppsMap := map[string]bool{}
// healthy stages and the first non-healthy stage should have sync enabled
// every stage after should have sync disabled
for _, app := range currentApplications {
currentAppsMap[app.Name] = true
}
for i := range appDependencyList {
for stepIndex := range appDependencyList {
// set the syncEnabled boolean for every Application in the current step
for _, appName := range appDependencyList[i] {
appSyncMap[appName] = syncEnabled
for _, appName := range appDependencyList[stepIndex] {
appSyncMap[appName] = true
}
// detect if we need to halt before progressing to the next step
for _, appName := range appDependencyList[i] {
// evaluate if we need to sync next waves
syncNextWave := true
for _, appName := range appDependencyList[stepIndex] {
// Check if application is created and managed by this AppSet, if it is not created yet, we cannot progress
if _, ok := currentAppsMap[appName]; !ok {
syncNextWave = false
break
}
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appName)
if idx == -1 {
// no Application status found, likely because the Application is being newly created
syncEnabled = false
// No Application status found, likely because the Application is being newly created
// This mean this wave is not yet completed
syncNextWave = false
break
}
appStatus := applicationSet.Status.ApplicationStatus[idx]
app, ok := appMap[appName]
if !ok {
// application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted
syncEnabled = false
break
}
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
if !syncEnabled {
if appStatus.Status != argov1alpha1.ProgressiveSyncHealthy {
// At least one application in this wave is not yet healthy. We cannot proceed to the next wave
syncNextWave = false
break
}
}
if !syncNextWave {
break
}
}
return appSyncMap
}
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
if progressiveSyncsRollingSyncStrategyEnabled(appset) {
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
}
return true
}
func isRollingSyncStrategy(appset *argov1alpha1.ApplicationSet) bool {
// It's only RollingSync if the type specifically sets it
return appset.Spec.Strategy != nil && appset.Spec.Strategy.Type == "RollingSync" && appset.Spec.Strategy.RollingSync != nil
@@ -1087,29 +1087,21 @@ func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.Application
return isRollingSyncStrategy(appset) && len(appset.Spec.Strategy.RollingSync.Steps) > 0
}
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
}
func isApplicationHealthy(app argov1alpha1.Application) bool {
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
if healthStatusString == "Healthy" && syncStatusString != "OutOfSync" && (operationPhaseString == "Succeeded" || operationPhaseString == "") {
return true
func isApplicationWithError(app argov1alpha1.Application) bool {
for _, condition := range app.Status.Conditions {
if condition.Type == argov1alpha1.ApplicationConditionInvalidSpecError {
return true
}
if condition.Type == argov1alpha1.ApplicationConditionUnknownError {
return true
}
}
return false
}
func statusStrings(app argov1alpha1.Application) (string, string, string) {
healthStatusString := string(app.Status.Health.Status)
syncStatusString := string(app.Status.Sync.Status)
operationPhaseString := ""
if app.Status.OperationState != nil {
operationPhaseString = string(app.Status.OperationState.Phase)
}
return healthStatusString, syncStatusString, operationPhaseString
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
}
func getAppStep(appName string, appStepMap map[string]int) int {
@@ -1128,81 +1120,112 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
for _, app := range applications {
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
appHealthStatus := app.Status.Health.Status
appSyncStatus := app.Status.Sync.Status
currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{}
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
if idx == -1 {
// AppStatus not found, set default status of "Waiting"
currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{
Application: app.Name,
TargetRevisions: app.Status.GetRevisions(),
LastTransitionTime: &now,
Message: "No Application status found, defaulting status to Waiting.",
Status: "Waiting",
Message: "No Application status found, defaulting status to Waiting",
Status: argov1alpha1.ProgressiveSyncWaiting,
Step: strconv.Itoa(getAppStep(app.Name, appStepMap)),
}
} else {
// we have an existing AppStatus
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
}
}
statusLogCtx := logCtx.WithFields(log.Fields{
"app.name": currentAppStatus.Application,
"app.health": appHealthStatus,
"app.sync": appSyncStatus,
"status.status": currentAppStatus.Status,
"status.message": currentAppStatus.Message,
"status.step": currentAppStatus.Step,
"status.targetRevisions": strings.Join(currentAppStatus.TargetRevisions, ","),
})
newAppStatus := currentAppStatus.DeepCopy()
newAppStatus.Step = strconv.Itoa(getAppStep(newAppStatus.Application, appStepMap))
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
currentAppStatus.Status = "Waiting"
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
// A new version is available in the application and we need to re-sync the application
newAppStatus.TargetRevisions = app.Status.GetRevisions()
newAppStatus.Message = "Application has pending changes, setting status to Waiting"
newAppStatus.Status = argov1alpha1.ProgressiveSyncWaiting
newAppStatus.LastTransitionTime = &now
}
appOutdated := false
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
appOutdated = syncStatusString == "OutOfSync"
}
if newAppStatus.Status == argov1alpha1.ProgressiveSyncWaiting {
// App has changed to waiting because the TargetRevisions changed or it is a new selected app
// This does not mean we should always sync the app. The app may not be OutOfSync
// and may not require a sync if it does not have differences.
if appSyncStatus == argov1alpha1.SyncStatusCodeSynced {
if app.Status.Health.Status == health.HealthStatusHealthy {
newAppStatus.LastTransitionTime = &now
newAppStatus.Status = argov1alpha1.ProgressiveSyncHealthy
newAppStatus.Message = "Application resource has synced, updating status to Healthy"
} else {
newAppStatus.LastTransitionTime = &now
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
newAppStatus.Message = "Application resource has synced, updating status to Progressing"
}
}
} else {
// The target revision is the same, so we need to evaluate the current revision progress
if currentAppStatus.Status == argov1alpha1.ProgressiveSyncPending {
// No need to evaluate status health further if the application did not change since our last transition
if app.Status.ReconciledAt == nil || (newAppStatus.LastTransitionTime != nil && app.Status.ReconciledAt.After(newAppStatus.LastTransitionTime.Time)) {
// Validate that at least one sync was trigerred after the pending transition time
if app.Status.OperationState != nil && app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
statusLogCtx = statusLogCtx.WithField("app.operation", app.Status.OperationState.Phase)
newAppStatus.LastTransitionTime = &now
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
logCtx.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Waiting"
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
}
switch {
case app.Status.OperationState.Phase.Successful():
newAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing"
case app.Status.OperationState.Phase.Completed():
newAppStatus.Message = "Application resource completed a sync, updating status from Pending to Progressing"
default:
// If a sync fails or has errors, the Application should be configured with retry. It is not the appset's job to retry failed syncs
newAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing"
}
} else if isApplicationWithError(app) {
// Validate if the application has errors preventing it to be reconciled and perform syncs
// If it does, we move it to progressing.
newAppStatus.LastTransitionTime = &now
newAppStatus.Status = argov1alpha1.ProgressiveSyncProgressing
newAppStatus.Message = "Application resource has error and cannot sync, updating status to Progressing"
}
}
}
if currentAppStatus.Status == "Pending" {
if !appOutdated && operationPhaseString == "Succeeded" {
logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
if currentAppStatus.Status == argov1alpha1.ProgressiveSyncProgressing {
// If the status has reached progressing, we know a sync has been triggered. No matter the result of that operation,
// we want an the app to reach the Healthy state for the current revision.
if appHealthStatus == health.HealthStatusHealthy && appSyncStatus == argov1alpha1.SyncStatusCodeSynced {
newAppStatus.LastTransitionTime = &now
newAppStatus.Status = argov1alpha1.ProgressiveSyncHealthy
newAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy"
}
}
}
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
logCtx.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
if newAppStatus.LastTransitionTime == &now {
statusLogCtx.WithFields(log.Fields{
"new_status.status": newAppStatus.Status,
"new_status.message": newAppStatus.Message,
"new_status.step": newAppStatus.Step,
"new_status.targetRevisions": strings.Join(newAppStatus.TargetRevisions, ","),
}).Info("Progressive sync application changed status")
}
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
logCtx.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
}
appStatuses = append(appStatuses, currentAppStatus)
appStatuses = append(appStatuses, *newAppStatus)
}
err := r.setAppSetApplicationStatus(ctx, logCtx, applicationSet, appStatuses)
@@ -1214,7 +1237,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
}
// check Applications that are in Waiting status and promote them to Pending if needed
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appsToSync map[string]bool, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
@@ -1230,12 +1253,20 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
for _, appStatus := range applicationSet.Status.ApplicationStatus {
totalCountMap[appStepMap[appStatus.Application]]++
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
if appStatus.Status == argov1alpha1.ProgressiveSyncPending || appStatus.Status == argov1alpha1.ProgressiveSyncProgressing {
updateCountMap[appStepMap[appStatus.Application]]++
}
}
for _, appStatus := range applicationSet.Status.ApplicationStatus {
statusLogCtx := logCtx.WithFields(log.Fields{
"app.name": appStatus.Application,
"status.status": appStatus.Status,
"status.message": appStatus.Message,
"status.step": appStatus.Step,
"status.targetRevisions": strings.Join(appStatus.TargetRevisions, ","),
})
maxUpdateAllowed := true
maxUpdate := &intstr.IntOrString{}
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
@@ -1246,7 +1277,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if maxUpdate != nil {
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
if err != nil {
logCtx.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
statusLogCtx.Warnf("AppSet has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", maxUpdate, err)
}
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
@@ -1256,16 +1287,21 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
maxUpdateAllowed = false
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap), applicationSet.Name)
statusLogCtx.Infof("Application is not allowed to update yet, %v/%v Applications already updating in step %v", updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap))
}
}
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
logCtx.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
if appStatus.Status == argov1alpha1.ProgressiveSyncWaiting && appsToSync[appStatus.Application] && maxUpdateAllowed {
appStatus.LastTransitionTime = &now
appStatus.Status = "Pending"
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
appStatus.Step = strconv.Itoa(getAppStep(appStatus.Application, appStepMap))
appStatus.Status = argov1alpha1.ProgressiveSyncPending
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing"
statusLogCtx.WithFields(log.Fields{
"new_status.status": appStatus.Status,
"new_status.message": appStatus.Message,
"new_status.step": appStatus.Step,
"new_status.targetRevisions": strings.Join(appStatus.TargetRevisions, ","),
}).Info("Progressive sync application changed status")
updateCountMap[appStepMap[appStatus.Application]]++
}
@@ -1290,9 +1326,9 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditio
completedWaves := map[string]bool{}
for _, appStatus := range applicationSet.Status.ApplicationStatus {
if v, ok := completedWaves[appStatus.Step]; !ok {
completedWaves[appStatus.Step] = appStatus.Status == "Healthy"
completedWaves[appStatus.Step] = appStatus.Status == argov1alpha1.ProgressiveSyncHealthy
} else {
completedWaves[appStatus.Step] = v && appStatus.Status == "Healthy"
completedWaves[appStatus.Step] = v && appStatus.Status == argov1alpha1.ProgressiveSyncHealthy
}
}
@@ -1507,30 +1543,31 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
return nil
}
func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) []argov1alpha1.Application {
func (r *ApplicationSetReconciler) syncDesiredApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appsToSync map[string]bool, desiredApplications []argov1alpha1.Application) []argov1alpha1.Application {
rolloutApps := []argov1alpha1.Application{}
for i := range validApps {
for i := range desiredApplications {
pruneEnabled := false
// ensure that Applications generated with RollingSync do not have an automated sync policy, since the AppSet controller will handle triggering the sync operation instead
if validApps[i].Spec.SyncPolicy != nil && validApps[i].Spec.SyncPolicy.IsAutomatedSyncEnabled() {
pruneEnabled = validApps[i].Spec.SyncPolicy.Automated.Prune
validApps[i].Spec.SyncPolicy.Automated = nil
if desiredApplications[i].Spec.SyncPolicy != nil && desiredApplications[i].Spec.SyncPolicy.IsAutomatedSyncEnabled() {
pruneEnabled = desiredApplications[i].Spec.SyncPolicy.Automated.Prune
desiredApplications[i].Spec.SyncPolicy.Automated.Enabled = ptr.To(false)
}
appSetStatusPending := false
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, validApps[i].Name)
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == "Pending" {
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, desiredApplications[i].Name)
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == argov1alpha1.ProgressiveSyncPending {
// only trigger a sync for Applications that are in Pending status, since this is governed by maxUpdate
appSetStatusPending = true
}
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
validApps[i] = syncApplication(validApps[i], pruneEnabled)
// check appsToSync to determine which Applications are ready to be updated and which should be skipped
if appsToSync[desiredApplications[i].Name] && appSetStatusPending {
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", desiredApplications[i].Name, pruneEnabled)
desiredApplications[i] = syncApplication(desiredApplications[i], pruneEnabled)
}
rolloutApps = append(rolloutApps, validApps[i])
rolloutApps = append(rolloutApps, desiredApplications[i])
}
return rolloutApps
}

File diff suppressed because it is too large Load Diff

2
assets/swagger.json generated
View File

@@ -7077,7 +7077,7 @@
},
"status": {
"type": "string",
"title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)"
"title": "Status contains the AppSet's perceived status of the managed Application resource"
},
"step": {
"type": "string",

View File

@@ -1,18 +1,21 @@
# Progressive Syncs
!!! warning "Alpha Feature (Since v2.6.0)"
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha)
feature that allows you to control the order in which the ApplicationSet controller will create or update the Applications
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha)
feature that allows you to control the order in which the ApplicationSet controller will create or update the Applications
owned by an ApplicationSet resource. It may be removed in future releases or modified in backwards-incompatible ways.
## Use Cases
The Progressive Syncs feature set is intended to be light and flexible. The feature only interacts with the health of managed Applications. It is not intended to support direct integrations with other Rollout controllers (such as the native ReplicaSet controller or Argo Rollouts).
* Progressive Syncs watch for the managed Application resources to become "Healthy" before proceeding to the next stage.
* Deployments, DaemonSets, StatefulSets, and [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) are all supported, because the Application enters a "Progressing" state while pods are being rolled out. In fact, any resource with a health check that can report a "Progressing" status is supported.
* [Argo CD Resource Hooks](../../user-guide/resource_hooks.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
- Progressive Syncs watch for the managed Application resources to become "Healthy" before proceeding to the next stage.
- Deployments, DaemonSets, StatefulSets, and [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) are all supported, because the Application enters a "Progressing" state while pods are being rolled out. In fact, any resource with a health check that can report a "Progressing" status is supported.
- [Argo CD Resource Hooks](../../user-guide/resource_hooks.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
## Enabling Progressive Syncs
As an experimental feature, progressive syncs must be explicitly enabled, in one of these ways.
1. Pass `--enable-progressive-syncs` to the ApplicationSet controller args.
@@ -23,17 +26,18 @@ As an experimental feature, progressive syncs must be explicitly enabled, in one
ApplicationSet strategies control both how applications are created (or updated) and deleted. These operations are configured using two separate fields:
* **Creation Strategy** (`type` field): Controls application creation and updates
* **Deletion Strategy** (`deletionOrder` field): Controls application deletion order
- **Creation Strategy** (`type` field): Controls application creation and updates
- **Deletion Strategy** (`deletionOrder` field): Controls application deletion order
### Creation Strategies
The `type` field controls how applications are created and updated. Available values:
* **AllAtOnce** (default)
* **RollingSync**
- **AllAtOnce** (default)
- **RollingSync**
#### AllAtOnce
This default Application update behavior is unchanged from the original ApplicationSet implementation.
All Applications managed by the ApplicationSet resource are updated simultaneously when the ApplicationSet is updated.
@@ -41,25 +45,25 @@ All Applications managed by the ApplicationSet resource are updated simultaneous
```yaml
spec:
strategy:
type: AllAtOnce # explicit, but this is the default
type: AllAtOnce # explicit, but this is the default
```
#### RollingSync
This update strategy allows you to group Applications by labels present on the generated Application resources.
When the ApplicationSet changes, the changes will be applied to each group of Application resources sequentially.
* Application groups are selected using their labels and `matchExpressions`.
* All `matchExpressions` must be true for an Application to be selected (multiple expressions match with AND behavior).
* The `In` and `NotIn` operators must match at least one value to be considered true (OR behavior).
* The `NotIn` operator has priority in the event that both a `NotIn` and `In` operator produce a match.
* All Applications in each group must become Healthy before the ApplicationSet controller will proceed to update the next group of Applications.
* The number of simultaneous Application updates in a group will not exceed its `maxUpdate` parameter (default is 100%, unbounded).
* RollingSync will capture external changes outside the ApplicationSet resource, since it relies on watching the OutOfSync status of the managed Applications.
* RollingSync will force all generated Applications to have autosync disabled. Warnings are printed in the applicationset-controller logs for any Application specs with an automated syncPolicy enabled.
* Sync operations are triggered the same way as if they were triggered by the UI or CLI (by directly setting the `operation` status field on the Application resource). This means that a RollingSync will respect sync windows just as if a user had clicked the "Sync" button in the Argo UI.
* When a sync is triggered, the sync is performed with the same syncPolicy configured for the Application. For example, this preserves the Application's retry settings.
* If an Application is considered "Pending" for `applicationsetcontroller.default.application.progressing.timeout` seconds, the Application is automatically moved to Healthy status (default 300).
* If an Application is not selected in any step, it will be excluded from the rolling sync and needs to be manually synced through the CLI or UI.
- Application groups are selected using their labels and `matchExpressions`.
- All `matchExpressions` must be true for an Application to be selected (multiple expressions match with AND behavior).
- The `In` and `NotIn` operators must match at least one value to be considered true (OR behavior).
- The `NotIn` operator has priority in the event that both a `NotIn` and `In` operator produce a match.
- All Applications in each group must become Healthy before the ApplicationSet controller will proceed to update the next group of Applications.
- The number of simultaneous Application updates in a group will not exceed its `maxUpdate` parameter (default is 100%, unbounded).
- RollingSync will capture external changes outside the ApplicationSet resource, since it relies on watching the OutOfSync status of the managed Applications.
- RollingSync will force all generated Applications to have autosync disabled. Warnings are printed in the applicationset-controller logs for any Application specs with an automated syncPolicy enabled.
- Sync operations are triggered the same way as if they were triggered by the UI or CLI (by directly setting the `operation` status field on the Application resource). This means that a RollingSync will respect sync windows just as if a user had clicked the "Sync" button in the Argo UI.
- When a sync is triggered, the sync is performed with the same syncPolicy configured for the Application. For example, this preserves the Application's retry settings.
- If an Application is not selected in any step, it will be excluded from the rolling sync and needs to be manually synced through the CLI or UI.
```yaml
spec:
@@ -84,25 +88,28 @@ spec:
The `deletionOrder` field controls the order in which applications are deleted when they are removed from the ApplicationSet. Available values:
* **AllAtOnce** (default)
* **Reverse**
- **AllAtOnce** (default)
- **Reverse**
#### AllAtOnce Deletion
This is the default behavior where all applications that need to be deleted are removed simultaneously. This works with both `AllAtOnce` and `RollingSync` creation strategies.
```yaml
spec:
strategy:
type: RollingSync # or AllAtOnce
deletionOrder: AllAtOnce # explicit, but this is the default
type: RollingSync # or AllAtOnce
deletionOrder: AllAtOnce # explicit, but this is the default
```
#### Reverse Deletion
When using `deletionOrder: Reverse` with RollingSync strategy, applications are deleted in reverse order of the steps defined in `rollingSync.steps`. This ensures that applications deployed in later steps are deleted before applications deployed in earlier steps.
This strategy is particularly useful when you need to tear down dependent services in the particular sequence.
**Requirements for Reverse deletion:**
- Must be used with `type: RollingSync`
- Must be used with `type: RollingSync`
- Requires `rollingSync.steps` to be defined
- Applications are deleted in reverse order of step sequence
@@ -119,28 +126,30 @@ spec:
- key: envLabel
operator: In
values:
- env-dev # Step 1: Created first, deleted last
- env-dev # Step 1: Created first, deleted last
- matchExpressions:
- key: envLabel
- key: envLabel
operator: In
values:
- env-prod # Step 2: Created second, deleted first
- env-prod # Step 2: Created second, deleted first
```
In this example, when applications are deleted:
1. `env-prod` applications (Step 2) are deleted first
2. `env-dev` applications (Step 1) are deleted second
This deletion order is useful for scenarios where you need to tear down dependent services in the correct sequence, such as deleting frontend services before backend dependencies.
#### Example
The following example illustrates how to stage a progressive sync over Applications with explicitly configured environment labels.
Once a change is pushed, the following will happen in order.
* All `env-dev` Applications will be updated simultaneously.
* The rollout will wait for all `env-qa` Applications to be manually synced via the `argocd` CLI or by clicking the Sync button in the UI.
* 10% of all `env-prod` Applications will be updated at a time until all `env-prod` Applications have been updated.
- All `env-dev` Applications will be updated simultaneously.
- The rollout will wait for all `env-qa` Applications to be manually synced via the `argocd` CLI or by clicking the Sync button in the UI.
- 10% of all `env-prod` Applications will be updated at a time until all `env-prod` Applications have been updated.
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -149,20 +158,20 @@ metadata:
name: guestbook
spec:
generators:
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
env: env-dev
- cluster: engineering-qa
url: https://2.4.6.8
env: env-qa
- cluster: engineering-prod
url: https://9.8.7.6/
env: env-prod
- list:
elements:
- cluster: engineering-dev
url: https://1.2.3.4
env: env-dev
- cluster: engineering-qa
url: https://2.4.6.8
env: env-qa
- cluster: engineering-prod
url: https://9.8.7.6/
env: env-prod
strategy:
type: RollingSync
deletionOrder: Reverse # Applications will be deleted in reverse order of steps
deletionOrder: Reverse # Applications will be deleted in reverse order of steps
rollingSync:
steps:
- matchExpressions:
@@ -176,15 +185,15 @@ spec:
operator: In
values:
- env-qa
maxUpdate: 0 # if 0, no matched applications will be updated
maxUpdate: 0 # if 0, no matched applications will be updated
- matchExpressions:
- key: envLabel
operator: In
values:
- env-prod
maxUpdate: 10% # maxUpdate supports both integer and percentage string values (rounds down, but floored at 1 Application for >0%)
maxUpdate: 10% # maxUpdate supports both integer and percentage string values (rounds down, but floored at 1 Application for >0%)
goTemplate: true
goTemplateOptions: ["missingkey=error"]
goTemplateOptions: ['missingkey=error']
template:
metadata:
name: '{{.cluster}}-guestbook'

View File

@@ -223,10 +223,10 @@ The following resources have Go-based health checks:
## Health Checks
An Argo CD App's health is inferred from the health of its immediate child resources (the resources represented in
source control). The App health will be the worst health of its immediate child sources. The priority of most to least
healthy statuses is: `Healthy`, `Suspended`, `Progressing`, `Missing`, `Degraded`, `Unknown`. So, for example, if an App
has a `Missing` resource and a `Degraded` resource, the App's health will be `Missing`.
Argo CD App health is inferred from the health of its immediate child resources as represented in the application source.
The App health will be the **worst health of its immediate child resources**, based on the following priority (from most to least healthy):
**Healthy, Suspended, Progressing, Missing, Degraded, Unknown.**
For example, if an App has a Missing resource and a Degraded resource, the App's health will be **Degraded**.
But the health of a resource is not inherited from child resources - it is calculated using only information about the
resource itself. A resource's status field may or may not contain information about the health of a child resource, and

View File

@@ -5,5 +5,5 @@ mkdocs-material==7.1.8
markdown_include==0.8.1
pygments==2.19.2
jinja2==3.1.6
markdown==3.8.2
markdown==3.9
pymdown-extensions==10.16.1

View File

@@ -195,7 +195,7 @@ git commit -m "Bump image to v1.2.3" \
```
!!!note Newlines are not allowed
The commit trailers must not contain newlines. The
The commit trailers must not contain newlines.
So the full CI script might look something like this:

4
go.mod
View File

@@ -6,7 +6,7 @@ require (
code.gitea.io/sdk/gitea v0.22.0
dario.cat/mergo v1.0.2
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0
github.com/Azure/kubelogin v0.2.10
github.com/Masterminds/semver/v3 v3.4.0
github.com/Masterminds/sprig/v3 v3.3.0
@@ -134,7 +134,7 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect

8
go.sum
View File

@@ -46,8 +46,8 @@ github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs=
github.com/42wim/httpsig v1.2.3/go.mod h1:nZq9OlYKDrUBhptd77IHx4/sZZD+IxTBADvAPI9G/EM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
@@ -74,8 +74,8 @@ github.com/Azure/kubelogin v0.2.10 h1:6CBXJt/RtnTPI1R1E4cfEdL+BnCKMuywtglX//FZPD
github.com/Azure/kubelogin v0.2.10/go.mod h1:JtR+7h3NHAwQPZ+CagUZ+F1Uk3/JU0eRFwpESSnRNGU=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=

View File

@@ -10,7 +10,7 @@ PATH="${INSTALL_PATH}:${PATH}"
[ -d "$INSTALL_PATH" ] || mkdir -p "$INSTALL_PATH"
# renovate: datasource=github-releases depName=gotestyourself/gotestsum packageName=gotestyourself/gotestsum
GOTESTSUM_VERSION=1.12.3
GOTESTSUM_VERSION=1.13.0
OS=$(go env GOOS)
ARCH=$(go env GOARCH)

View File

@@ -40,7 +40,7 @@ spec:
serviceAccountName: argocd-redis
containers:
- name: redis
image: redis:7.2.7-alpine
image: redis:8.2.1-alpine
imagePullPolicy: Always
args:
- "--save"

View File

@@ -25076,7 +25076,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: Always
name: redis
ports:

View File

@@ -24910,7 +24910,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: Always
name: redis
ports:

View File

@@ -1,6 +1,6 @@
dependencies:
- name: redis-ha
repository: https://dandydeveloper.github.io/charts
version: 4.33.2
digest: sha256:dfb01cb345d8e0c3cf41294ca9b7eae8272083f3ed165cf58d35e492403cf0aa
generated: "2025-03-04T08:25:54.199096-08:00"
version: 4.34.11
digest: sha256:65651a2e28ac28852dd75e642e42ec06557d9bc14949c62d817dec315812346e
generated: "2025-09-16T11:02:38.114394+03:00"

View File

@@ -1,4 +1,4 @@
dependencies:
- name: redis-ha
version: 4.33.2
version: 4.34.11
repository: https://dandydeveloper.github.io/charts

View File

@@ -9,7 +9,7 @@ metadata:
labels:
heritage: Helm
release: argocd
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
app: argocd-redis-ha
secrets:
- name: argocd-redis
@@ -23,7 +23,7 @@ metadata:
labels:
heritage: Helm
release: argocd
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
app: argocd-redis-ha
---
# Source: redis-ha/charts/redis-ha/templates/redis-ha-configmap.yaml
@@ -35,7 +35,7 @@ metadata:
labels:
heritage: Helm
release: argocd
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
app: argocd-redis-ha
data:
redis.conf: |
@@ -606,12 +606,28 @@ data:
if [ "$MASTER" = "$ANNOUNCE_IP" ]; then
redis_role
if [ "$ROLE" != "master" ]; then
reinit
echo "waiting for redis to become master"
sleep 10
identify_master
redis_role
echo "Redis role is $ROLE, expected role is master. No need to reinitialize."
if [ "$ROLE" != "master" ]; then
echo "Redis role is $ROLE, expected role is master, reinitializing"
reinit
fi
fi
elif [ "${MASTER}" ]; then
identify_redis_master
if [ "$REDIS_MASTER" != "$MASTER" ]; then
reinit
echo "Redis master and local master are not the same. waiting."
sleep 10
identify_master
identify_redis_master
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}. No need to reinitialize."
if [ "${REDIS_MASTER}" != "${MASTER}" ]; then
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}, reinitializing"
reinit
fi
fi
fi
done
@@ -779,7 +795,7 @@ metadata:
labels:
heritage: Helm
release: argocd
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
app: argocd-redis-ha
data:
redis_liveness.sh: |
@@ -855,7 +871,7 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
rules:
- apiGroups:
- ""
@@ -874,8 +890,8 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
component: argocd-redis-ha-haproxy
chart: redis-ha-4.34.11
component: haproxy
rules:
- apiGroups:
- ""
@@ -894,7 +910,7 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
subjects:
- kind: ServiceAccount
name: argocd-redis-ha
@@ -913,8 +929,8 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
component: argocd-redis-ha-haproxy
chart: redis-ha-4.34.11
component: haproxy
subjects:
- kind: ServiceAccount
name: argocd-redis-ha-haproxy
@@ -933,7 +949,7 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
annotations:
spec:
publishNotReadyAddresses: true
@@ -962,7 +978,7 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
annotations:
spec:
publishNotReadyAddresses: true
@@ -991,7 +1007,7 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
annotations:
spec:
publishNotReadyAddresses: true
@@ -1020,7 +1036,7 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
annotations:
spec:
type: ClusterIP
@@ -1048,8 +1064,8 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
component: argocd-redis-ha-haproxy
chart: redis-ha-4.34.11
component: haproxy
annotations:
spec:
type: ClusterIP
@@ -1076,7 +1092,8 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
component: haproxy
spec:
strategy:
type: RollingUpdate
@@ -1086,12 +1103,14 @@ spec:
matchLabels:
app: redis-ha-haproxy
release: argocd
component: haproxy
template:
metadata:
name: argocd-redis-ha-haproxy
labels:
app: redis-ha-haproxy
release: argocd
component: haproxy
annotations:
prometheus.io/port: "9101"
prometheus.io/scrape: "true"
@@ -1109,7 +1128,7 @@ spec:
nodeSelector:
{}
tolerations:
null
[]
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -1117,6 +1136,7 @@ spec:
matchLabels:
app: redis-ha-haproxy
release: argocd
component: haproxy
topologyKey: kubernetes.io/hostname
initContainers:
- name: config-init
@@ -1210,7 +1230,7 @@ metadata:
app: redis-ha
heritage: "Helm"
release: "argocd"
chart: redis-ha-4.33.2
chart: redis-ha-4.34.11
annotations:
{}
spec:
@@ -1226,7 +1246,7 @@ spec:
template:
metadata:
annotations:
checksum/init-config: bd30e83dfdad9912b6c1cc32a8c26d7d01429a0730f5ee7af380fb593e874d54
checksum/init-config: fd74f7d84e39b3f6eac1d7ce5deb0083e58f218376faf363343d91a0fb4f2563
labels:
release: argocd
app: redis-ha
@@ -1250,7 +1270,7 @@ spec:
automountServiceAccountToken: false
initContainers:
- name: config-init
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
resources:
{}
@@ -1290,7 +1310,7 @@ spec:
containers:
- name: redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
command:
- redis-server
@@ -1334,11 +1354,11 @@ spec:
- -c
- /health/redis_readiness.sh
startupProbe:
initialDelaySeconds: 5
periodSeconds: 10
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 3
failureThreshold: 5
exec:
command:
- sh
@@ -1364,7 +1384,7 @@ spec:
- /bin/sh
- /readonly-config/trigger-failover-if-master.sh
- name: sentinel
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
command:
- redis-sentinel
@@ -1437,7 +1457,7 @@ spec:
- sleep 30; redis-cli -p 26379 sentinel reset argocd
- name: split-brain-fix
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
command:
- sh

View File

@@ -27,7 +27,7 @@ redis-ha:
serviceAccount:
automountToken: true
image:
tag: 7.2.7-alpine
tag: 8.2.1-alpine
sentinel:
bind: '0.0.0.0'
lifecycle:

View File

@@ -25203,12 +25203,28 @@ data:
if [ "$MASTER" = "$ANNOUNCE_IP" ]; then
redis_role
if [ "$ROLE" != "master" ]; then
reinit
echo "waiting for redis to become master"
sleep 10
identify_master
redis_role
echo "Redis role is $ROLE, expected role is master. No need to reinitialize."
if [ "$ROLE" != "master" ]; then
echo "Redis role is $ROLE, expected role is master, reinitializing"
reinit
fi
fi
elif [ "${MASTER}" ]; then
identify_redis_master
if [ "$REDIS_MASTER" != "$MASTER" ]; then
reinit
echo "Redis master and local master are not the same. waiting."
sleep 10
identify_master
identify_redis_master
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}. No need to reinitialize."
if [ "${REDIS_MASTER}" != "${MASTER}" ]; then
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}, reinitializing"
reinit
fi
fi
fi
done
@@ -27887,7 +27903,7 @@ spec:
template:
metadata:
annotations:
checksum/init-config: bd30e83dfdad9912b6c1cc32a8c26d7d01429a0730f5ee7af380fb593e874d54
checksum/init-config: fd74f7d84e39b3f6eac1d7ce5deb0083e58f218376faf363343d91a0fb4f2563
labels:
app.kubernetes.io/name: argocd-redis-ha
spec:
@@ -27910,7 +27926,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -27958,9 +27974,9 @@ spec:
- sh
- -c
- /health/redis_readiness.sh
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 15
volumeMounts:
@@ -27981,7 +27997,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@@ -28056,7 +28072,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: split-brain-fix
resources: {}
@@ -28091,7 +28107,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: config-init
securityContext:

View File

@@ -25194,12 +25194,28 @@ data:
if [ "$MASTER" = "$ANNOUNCE_IP" ]; then
redis_role
if [ "$ROLE" != "master" ]; then
reinit
echo "waiting for redis to become master"
sleep 10
identify_master
redis_role
echo "Redis role is $ROLE, expected role is master. No need to reinitialize."
if [ "$ROLE" != "master" ]; then
echo "Redis role is $ROLE, expected role is master, reinitializing"
reinit
fi
fi
elif [ "${MASTER}" ]; then
identify_redis_master
if [ "$REDIS_MASTER" != "$MASTER" ]; then
reinit
echo "Redis master and local master are not the same. waiting."
sleep 10
identify_master
identify_redis_master
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}. No need to reinitialize."
if [ "${REDIS_MASTER}" != "${MASTER}" ]; then
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}, reinitializing"
reinit
fi
fi
fi
done
@@ -27723,7 +27739,7 @@ spec:
template:
metadata:
annotations:
checksum/init-config: bd30e83dfdad9912b6c1cc32a8c26d7d01429a0730f5ee7af380fb593e874d54
checksum/init-config: fd74f7d84e39b3f6eac1d7ce5deb0083e58f218376faf363343d91a0fb4f2563
labels:
app.kubernetes.io/name: argocd-redis-ha
spec:
@@ -27746,7 +27762,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -27794,9 +27810,9 @@ spec:
- sh
- -c
- /health/redis_readiness.sh
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 15
volumeMounts:
@@ -27817,7 +27833,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@@ -27892,7 +27908,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: split-brain-fix
resources: {}
@@ -27927,7 +27943,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: config-init
securityContext:

View File

@@ -890,12 +890,28 @@ data:
if [ "$MASTER" = "$ANNOUNCE_IP" ]; then
redis_role
if [ "$ROLE" != "master" ]; then
reinit
echo "waiting for redis to become master"
sleep 10
identify_master
redis_role
echo "Redis role is $ROLE, expected role is master. No need to reinitialize."
if [ "$ROLE" != "master" ]; then
echo "Redis role is $ROLE, expected role is master, reinitializing"
reinit
fi
fi
elif [ "${MASTER}" ]; then
identify_redis_master
if [ "$REDIS_MASTER" != "$MASTER" ]; then
reinit
echo "Redis master and local master are not the same. waiting."
sleep 10
identify_master
identify_redis_master
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}. No need to reinitialize."
if [ "${REDIS_MASTER}" != "${MASTER}" ]; then
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}, reinitializing"
reinit
fi
fi
fi
done
@@ -3574,7 +3590,7 @@ spec:
template:
metadata:
annotations:
checksum/init-config: bd30e83dfdad9912b6c1cc32a8c26d7d01429a0730f5ee7af380fb593e874d54
checksum/init-config: fd74f7d84e39b3f6eac1d7ce5deb0083e58f218376faf363343d91a0fb4f2563
labels:
app.kubernetes.io/name: argocd-redis-ha
spec:
@@ -3597,7 +3613,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -3645,9 +3661,9 @@ spec:
- sh
- -c
- /health/redis_readiness.sh
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 15
volumeMounts:
@@ -3668,7 +3684,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@@ -3743,7 +3759,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: split-brain-fix
resources: {}
@@ -3778,7 +3794,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: config-init
securityContext:

View File

@@ -881,12 +881,28 @@ data:
if [ "$MASTER" = "$ANNOUNCE_IP" ]; then
redis_role
if [ "$ROLE" != "master" ]; then
reinit
echo "waiting for redis to become master"
sleep 10
identify_master
redis_role
echo "Redis role is $ROLE, expected role is master. No need to reinitialize."
if [ "$ROLE" != "master" ]; then
echo "Redis role is $ROLE, expected role is master, reinitializing"
reinit
fi
fi
elif [ "${MASTER}" ]; then
identify_redis_master
if [ "$REDIS_MASTER" != "$MASTER" ]; then
reinit
echo "Redis master and local master are not the same. waiting."
sleep 10
identify_master
identify_redis_master
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}. No need to reinitialize."
if [ "${REDIS_MASTER}" != "${MASTER}" ]; then
echo "Redis master is ${MASTER}, expected master is ${REDIS_MASTER}, reinitializing"
reinit
fi
fi
fi
done
@@ -3410,7 +3426,7 @@ spec:
template:
metadata:
annotations:
checksum/init-config: bd30e83dfdad9912b6c1cc32a8c26d7d01429a0730f5ee7af380fb593e874d54
checksum/init-config: fd74f7d84e39b3f6eac1d7ce5deb0083e58f218376faf363343d91a0fb4f2563
labels:
app.kubernetes.io/name: argocd-redis-ha
spec:
@@ -3433,7 +3449,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -3481,9 +3497,9 @@ spec:
- sh
- -c
- /health/redis_readiness.sh
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 15
volumeMounts:
@@ -3504,7 +3520,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@@ -3579,7 +3595,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: split-brain-fix
resources: {}
@@ -3614,7 +3630,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
name: config-init
securityContext:

View File

@@ -25741,7 +25741,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: Always
name: redis
ports:

View File

@@ -25575,7 +25575,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: Always
name: redis
ports:

View File

@@ -1428,7 +1428,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: Always
name: redis
ports:

View File

@@ -1262,7 +1262,7 @@ spec:
secretKeyRef:
key: auth
name: argocd-redis
image: public.ecr.aws/docker/library/redis:7.2.7-alpine
image: public.ecr.aws/docker/library/redis:8.2.1-alpine
imagePullPolicy: Always
name: redis
ports:

View File

@@ -867,6 +867,20 @@ const (
ApplicationSetReasonSyncApplicationError = "SyncApplicationError"
)
// Represents resource health status
type ProgressiveSyncStatusCode string
const (
// Indicates that an Application sync is waiting to be trigerred
ProgressiveSyncWaiting ProgressiveSyncStatusCode = "Waiting"
// Indicates that a sync has been trigerred, but the application did not report any status
ProgressiveSyncPending ProgressiveSyncStatusCode = "Pending"
// Indicates that the application has not yet reached an Healthy state in regards to the requested sync
ProgressiveSyncProgressing ProgressiveSyncStatusCode = "Progressing"
// Indicates that the application has reached an Healthy state in regards to the requested sync
ProgressiveSyncHealthy ProgressiveSyncStatusCode = "Healthy"
)
// ApplicationSetApplicationStatus contains details about each Application managed by the ApplicationSet
type ApplicationSetApplicationStatus struct {
// Application contains the name of the Application resource
@@ -875,8 +889,8 @@ type ApplicationSetApplicationStatus struct {
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,2,opt,name=lastTransitionTime"`
// Message contains human-readable message indicating details about the status
Message string `json:"message" protobuf:"bytes,3,opt,name=message"`
// Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)
Status string `json:"status" protobuf:"bytes,4,opt,name=status"`
// Status contains the AppSet's perceived status of the managed Application resource
Status ProgressiveSyncStatusCode `json:"status" protobuf:"bytes,4,opt,name=status"`
// Step tracks which step this Application should be updated in
Step string `json:"step" protobuf:"bytes,5,opt,name=step"`
// TargetRevision tracks the desired revisions the Application should be synced to.

File diff suppressed because it is too large Load Diff

View File

@@ -222,7 +222,7 @@ message ApplicationSetApplicationStatus {
// Message contains human-readable message indicating details about the status
optional string message = 3;
// Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)
// Status contains the AppSet's perceived status of the managed Application resource
optional string status = 4;
// Step tracks which step this Application should be updated in

3
renovate.json Normal file
View File

@@ -0,0 +1,3 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
}

View File

@@ -1,4 +1,4 @@
FROM docker.io/library/node:23.0.0@sha256:e643c0b70dca9704dff42e12b17f5b719dbe4f95e6392fc2dfa0c5f02ea8044d as node
FROM docker.io/library/node:23.11.1@sha256:9a25b5a6f9a90218b73a62205f111e71de5e4289aee952b4dd7e86f7498f2544 as node
RUN apt-get update && apt-get install --no-install-recommends -y \
software-properties-common

View File

@@ -1 +1 @@
22.9.0
22.19.0

View File

@@ -60,7 +60,7 @@
"resolutions": {
"@types/react": "^16.9.3",
"@types/react-dom": "^16.8.2",
"normalize-url": "4.3.0",
"normalize-url": "4.5.1",
"rxjs": "6.6.7"
},
"devDependencies": {

View File

@@ -41,7 +41,6 @@
&__conditions {
display: flex;
max-width: 250px;
margin: auto;
flex-wrap: wrap;
justify-content: center;
line-height: 1.5em;
@@ -98,10 +97,6 @@
@include responsive-widths();
}
&:first-child {
margin-top: 7px;
}
&:not(:first-child) {
@include themify($themes) {
border-left: 1px solid themed('border');
@@ -139,7 +134,7 @@
&__item-value {
display: flex;
align-items: center;
align-items: baseline;
margin-bottom: 0.5em;
font-weight: 500;
padding: 2px 0px;

View File

@@ -39,15 +39,19 @@ interface SectionInfo {
}
const sectionLabel = (info: SectionInfo) => (
<label style={{fontSize: '12px', fontWeight: 600, color: ARGO_GRAY6_COLOR}}>
<label style={{display: 'flex', alignItems: 'flex-start', fontSize: '12px', fontWeight: 600, color: ARGO_GRAY6_COLOR, minHeight: '18px'}}>
{info.title}
{info.helpContent && <HelpIcon title={info.helpContent} />}
{info.helpContent && (
<span style={{marginLeft: '5px'}}>
<HelpIcon title={info.helpContent} />
</span>
)}
</label>
);
const sectionHeader = (info: SectionInfo, onClick?: () => any) => {
return (
<div style={{display: 'flex', alignItems: 'center', marginBottom: '0.5em'}}>
<div style={{display: 'flex', alignItems: 'center'}}>
{sectionLabel(info)}
{onClick && (
<button className='argo-button application-status-panel__more-button' onClick={onClick}>
@@ -156,7 +160,7 @@ export const ApplicationStatusPanel = ({application, showDiff, showOperation, sh
return (
<div className='application-status-panel row'>
<div className='application-status-panel__item'>
<div style={{lineHeight: '19.5px', marginBottom: '0.3em'}}>{sectionLabel({title: 'APP HEALTH', helpContent: 'The health status of your app'})}</div>
{sectionHeader({title: 'APP HEALTH', helpContent: 'The health status of your app'})}
<div className='application-status-panel__item-value'>
<HealthStatusIcon state={application.status.health} />
&nbsp;
@@ -282,7 +286,7 @@ export const ApplicationStatusPanel = ({application, showDiff, showOperation, sh
)}
{application.status.conditions && (
<div className={`application-status-panel__item`}>
{sectionLabel({title: 'APP CONDITIONS'})}
{sectionHeader({title: 'APP CONDITIONS'})}
<div className='application-status-panel__item-value application-status-panel__conditions' onClick={() => showConditions && showConditions()}>
{infos && (
<a className='info'>

View File

@@ -6839,10 +6839,10 @@ normalize-path@^3.0.0, normalize-path@~3.0.0:
resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65"
integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==
normalize-url@4.3.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.3.0.tgz#9c49e10fc1876aeb76dba88bf1b2b5d9fa57b2ee"
integrity sha512-0NLtR71o4k6GLP+mr6Ty34c5GA6CMoEsncKJxvQd8NzPxaHRJNnb5gZE8R1XF4CPIS7QPHLJ74IFszwtNVAHVQ==
normalize-url@4.5.1:
version "4.5.1"
resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.1.tgz#0dd90cf1288ee1d1313b87081c9a5932ee48518a"
integrity sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==
npm-run-path@^4.0.1:
version "4.0.1"