Compare commits

...

1 Commits

Author SHA1 Message Date
Henry Liu
095eef49d9 sibling of 1fcbcc1c23 2024-10-29 11:53:58 +00:00
447 changed files with 7548 additions and 5409 deletions

View File

@@ -31,7 +31,7 @@ jobs:
docs: ${{ steps.filter.outputs.docs_any_changed }}
steps:
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c # v45.0.2
- uses: tj-actions/changed-files@e9772d140489982e0e3704fea5ee93d536f1e275 # v45.0.1
id: filter
with:
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file

View File

@@ -64,7 +64,7 @@ jobs:
git stash pop
- name: Create pull request
uses: peter-evans/create-pull-request@6cd32fd93684475c31847837f87bb135d40a2b79 # v7.0.3
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
with:
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"

View File

@@ -295,7 +295,7 @@ jobs:
if: ${{ env.UPDATE_VERSION == 'true' }}
- name: Create PR to update VERSION on master branch
uses: peter-evans/create-pull-request@6cd32fd93684475c31847837f87bb135d40a2b79 # v7.0.3
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
with:
commit-message: Bump version in master
title: "chore: Bump version in master"

View File

@@ -83,7 +83,7 @@ WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.9.0@sha256:cbe2d5f94110cea9817dd8c5809d05df49b4bd1aac5203f3594d88665ad37988 AS argocd-ui
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.8.0@sha256:bd00c03095f7586432805dbf7989be10361d27987f93de904b1fc003949a4794 AS argocd-ui
WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]

View File

@@ -1,7 +1,7 @@
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/app-controller} HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
redis: hack/start-redis-with-password.sh
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'

View File

@@ -3,9 +3,9 @@ header:
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
last-updated: '2023-10-27'
last-reviewed: '2023-10-27'
commit-hash: 74a367d10e7110209610ba3ec225539ebe5f7522
commit-hash: fe606708859574b9b6102a505e260fac5d3fb14e
project-url: https://github.com/argoproj/argo-cd
project-release: v2.14.0
project-release: v2.13.0
changelog: https://github.com/argoproj/argo-cd/releases
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
project-lifecycle:

View File

@@ -41,6 +41,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Beez Innovation Labs](https://www.beezlabs.com/)
1. [Bedag Informatik AG](https://www.bedag.ch/)
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
1. [Believable Bots](https://believablebots.io)
1. [BigPanda](https://bigpanda.io)
1. [BioBox Analytics](https://biobox.io)
1. [BMW Group](https://www.bmwgroup.com/)

View File

@@ -1 +1 @@
2.14.0
2.13.0-rc5

View File

@@ -994,7 +994,7 @@ func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1al
}
func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.ApplicationSet) bool {
return appset.Spec.Strategy != nil && appset.Spec.Strategy.RollingSync != nil && appset.Spec.Strategy.Type == "RollingSync"
return appset.Spec.Strategy != nil && appset.Spec.Strategy.RollingSync != nil && appset.Spec.Strategy.Type == "RollingSync" && len(appset.Spec.Strategy.RollingSync.Steps) > 0
}
func isApplicationHealthy(app argov1alpha1.Application) bool {
@@ -1017,6 +1017,16 @@ func statusStrings(app argov1alpha1.Application) (string, string, string) {
return healthStatusString, syncStatusString, operationPhaseString
}
func getAppStep(appName string, appStepMap map[string]int) int {
// if an application is not selected by any match expression, it defaults to step -1
step := -1
if appStep, ok := appStepMap[appName]; ok {
// 1-based indexing
step = appStep + 1
}
return step
}
// check the status of each Application's status and promote Applications to the next status if needed
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
now := metav1.Now()
@@ -1036,7 +1046,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
LastTransitionTime: &now,
Message: "No Application status found, defaulting status to Waiting.",
Status: "Waiting",
Step: fmt.Sprint(appStepMap[app.Name] + 1),
Step: fmt.Sprint(getAppStep(app.Name, appStepMap)),
TargetRevisions: app.Status.GetRevisions(),
}
} else {
@@ -1061,7 +1071,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Waiting"
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
}
@@ -1079,14 +1089,14 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
}
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = "Progressing"
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
}
}
@@ -1095,7 +1105,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
}
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
@@ -1103,7 +1113,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
currentAppStatus.LastTransitionTime = &now
currentAppStatus.Status = healthStatusString
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
}
appStatuses = append(appStatuses, currentAppStatus)
@@ -1124,14 +1134,12 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
if applicationSet.Spec.Strategy != nil && applicationSet.Spec.Strategy.Type != "" && applicationSet.Spec.Strategy.Type != "AllAtOnce" {
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
updateCountMap := []int{}
totalCountMap := []int{}
length := 0
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
length = len(applicationSet.Spec.Strategy.RollingSync.Steps)
}
length := len(applicationSet.Spec.Strategy.RollingSync.Steps)
for s := 0; s < length; s++ {
updateCountMap = append(updateCountMap, 0)
totalCountMap = append(totalCountMap, 0)
@@ -1141,10 +1149,8 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
for _, appStatus := range applicationSet.Status.ApplicationStatus {
totalCountMap[appStepMap[appStatus.Application]] += 1
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
updateCountMap[appStepMap[appStatus.Application]] += 1
}
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
updateCountMap[appStepMap[appStatus.Application]] += 1
}
}
@@ -1169,7 +1175,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
maxUpdateAllowed = false
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap), applicationSet.Name)
}
}
@@ -1178,7 +1184,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
appStatus.LastTransitionTime = &now
appStatus.Status = "Pending"
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1)
appStatus.Step = fmt.Sprint(getAppStep(appStatus.Application, appStepMap))
updateCountMap[appStepMap[appStatus.Application]] += 1
}

View File

@@ -3813,8 +3813,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
},
@@ -3830,8 +3839,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
},
@@ -3853,8 +3871,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
},
@@ -3876,8 +3903,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -3945,8 +3981,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4014,8 +4059,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4083,8 +4137,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4152,8 +4215,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4221,8 +4293,17 @@ func TestBuildAppSyncMap(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4400,8 +4481,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
},
@@ -4417,8 +4507,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
},
@@ -4440,6 +4539,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4459,8 +4561,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{},
@@ -4483,6 +4594,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4502,8 +4616,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4535,6 +4658,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4554,8 +4680,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4601,6 +4736,10 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
"app2-multisource": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4627,8 +4766,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4655,6 +4803,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4674,8 +4825,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4708,6 +4868,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4727,8 +4890,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4761,6 +4933,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4780,8 +4955,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4814,6 +4998,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4833,8 +5020,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
},
@@ -4883,8 +5079,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -4933,6 +5138,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -4952,8 +5160,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -5002,6 +5219,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -5021,8 +5241,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -5070,6 +5299,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",
@@ -5089,8 +5321,17 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
Spec: v1alpha1.ApplicationSetSpec{
Strategy: &v1alpha1.ApplicationSetStrategy{
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
Type: "RollingSync",
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: []v1alpha1.ApplicationSetRolloutStep{
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
{
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
},
},
},
},
},
Status: v1alpha1.ApplicationSetStatus{
@@ -5130,6 +5371,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
},
},
},
appStepMap: map[string]int{
"app1": 0,
},
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
{
Application: "app1",

View File

@@ -168,7 +168,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
if err != nil {
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
}
return pullrequest.NewBitbucketServiceBearerToken(ctx, providerConfig.API, appToken, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
return pullrequest.NewBitbucketServiceBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
} else if providerConfig.BasicAuth != nil {
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
if err != nil {

View File

@@ -82,7 +82,6 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
pr.Repository.Name == nil ||
pr.PullRequestId == nil ||
pr.SourceRefName == nil ||
pr.TargetRefName == nil ||
pr.LastMergeSourceCommit == nil ||
pr.LastMergeSourceCommit.CommitId == nil {
continue
@@ -95,13 +94,12 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
if *pr.Repository.Name == a.repo {
pullRequests = append(pullRequests, &PullRequest{
Number: *pr.PullRequestId,
Title: *pr.Title,
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
TargetBranch: strings.Replace(*pr.TargetRefName, "refs/heads/", "", 1),
HeadSHA: *pr.LastMergeSourceCommit.CommitId,
Labels: azureDevOpsLabels,
Author: strings.Split(*pr.CreatedBy.UniqueName, "@")[0], // Get the part before the @ in the email-address
Number: *pr.PullRequestId,
Title: *pr.Title,
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
HeadSHA: *pr.LastMergeSourceCommit.CommitId,
Labels: azureDevOpsLabels,
Author: strings.Split(*pr.CreatedBy.UniqueName, "@")[0], // Get the part before the @ in the email-address
})
}
}

View File

@@ -72,7 +72,6 @@ func TestListPullRequest(t *testing.T) {
PullRequestId: createIntPtr(pr_id),
Title: createStringPtr(pr_title),
SourceRefName: createStringPtr("refs/heads/feature-branch"),
TargetRefName: createStringPtr("refs/heads/main"),
LastMergeSourceCommit: &git.GitCommitRef{
CommitId: createStringPtr(pr_head_sha),
},
@@ -107,7 +106,6 @@ func TestListPullRequest(t *testing.T) {
require.NoError(t, err)
assert.Len(t, list, 1)
assert.Equal(t, "feature-branch", list[0].Branch)
assert.Equal(t, "main", list[0].TargetBranch)
assert.Equal(t, pr_head_sha, list[0].HeadSHA)
assert.Equal(t, "feat(123)", list[0].Title)
assert.Equal(t, pr_id, list[0].Number)

10
assets/swagger.json generated
View File

@@ -4719,6 +4719,9 @@
"impersonationEnabled": {
"type": "boolean"
},
"installationID": {
"type": "string"
},
"kustomizeOptions": {
"$ref": "#/definitions/v1alpha1KustomizeOptions"
},
@@ -6084,7 +6087,7 @@
"properties": {
"defaultServiceAccount": {
"type": "string",
"title": "ServiceAccountName to be used for impersonation during the sync operation"
"title": "DefaultServiceAccount to be used for impersonation during the sync operation"
},
"namespace": {
"description": "Namespace specifies the target namespace for the application's resources.",
@@ -9167,6 +9170,11 @@
"description": "SyncOperation contains details about a sync operation.",
"type": "object",
"properties": {
"autoHealAttemptsCount": {
"type": "integer",
"format": "int64",
"title": "SelfHealAttemptsCount contains the number of auto-heal attempts"
},
"dryRun": {
"type": "boolean",
"title": "DryRun specifies to perform a `kubectl apply --dry-run` without actually performing the sync"

View File

@@ -13,6 +13,7 @@ import (
"github.com/redis/go-redis/v9"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@@ -56,6 +57,9 @@ func NewCommand() *cobra.Command {
repoServerAddress string
repoServerTimeoutSeconds int
selfHealTimeoutSeconds int
selfHealBackoffTimeoutSeconds int
selfHealBackoffFactor int
selfHealBackoffCapSeconds int
statusProcessors int
operationProcessors int
glogLevel int
@@ -152,6 +156,14 @@ func NewCommand() *cobra.Command {
kubectl := kubeutil.NewKubectl()
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
errors.CheckError(err)
var selfHealBackoff *wait.Backoff
if selfHealBackoffTimeoutSeconds != 0 {
selfHealBackoff = &wait.Backoff{
Duration: time.Duration(selfHealBackoffTimeoutSeconds) * time.Second,
Factor: float64(selfHealBackoffFactor),
Cap: time.Duration(selfHealBackoffCapSeconds) * time.Second,
}
}
appController, err = controller.NewApplicationController(
namespace,
settingsMgr,
@@ -164,6 +176,7 @@ func NewCommand() *cobra.Command {
hardResyncDuration,
time.Duration(appResyncJitter)*time.Second,
time.Duration(selfHealTimeoutSeconds)*time.Second,
selfHealBackoff,
time.Duration(repoErrorGracePeriod)*time.Second,
metricsPort,
metricsCacheExpiration,
@@ -226,7 +239,10 @@ func NewCommand() *cobra.Command {
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 0, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
command.Flags().IntVar(&selfHealBackoffTimeoutSeconds, "self-heal-backoff-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_TIMEOUT_SECONDS", 2, 0, math.MaxInt32), "Specifies initial timeout of exponential backoff between self heal attempts")
command.Flags().IntVar(&selfHealBackoffFactor, "self-heal-backoff-factor", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_FACTOR", 3, 0, math.MaxInt32), "Specifies factor of exponential timeout between application self heal attempts")
command.Flags().IntVar(&selfHealBackoffCapSeconds, "self-heal-backoff-cap-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_CAP_SECONDS", 300, 0, math.MaxInt32), "Specifies max timeout of exponential backoff between application self heal attempts")
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")

View File

@@ -585,8 +585,8 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
var status string
var allow, deny, inactiveAllows bool
if windows.HasWindows() {
active := windows.Active()
if active.HasWindows() {
active, err := windows.Active()
if err == nil && active.HasWindows() {
for _, w := range *active {
if w.Kind == "deny" {
deny = true
@@ -595,13 +595,14 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
}
}
}
if windows.InactiveAllows().HasWindows() {
inactiveAllowWindows, err := windows.InactiveAllows()
if err == nil && inactiveAllowWindows.HasWindows() {
inactiveAllows = true
}
s := windows.CanSync(true)
if deny || !deny && !allow && inactiveAllows {
if s {
s, err := windows.CanSync(true)
if err == nil && s {
status = "Manual Allowed"
} else {
status = "Sync Denied"
@@ -1374,7 +1375,7 @@ func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[
}
if local, ok := objs[key]; ok || live != nil {
if local != nil && !kube.IsCRD(local) {
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()))
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()), argoSettings.GetInstallationID())
errors.CheckError(err)
}

View File

@@ -352,9 +352,10 @@ func printSyncWindows(proj *v1alpha1.AppProject) {
fmt.Fprintf(w, fmtStr, headers...)
if proj.Spec.SyncWindows.HasWindows() {
for i, window := range proj.Spec.SyncWindows {
isActive, _ := window.Active()
vals := []interface{}{
strconv.Itoa(i),
formatBoolOutput(window.Active()),
formatBoolOutput(isActive),
window.Kind,
window.Schedule,
window.Duration,

View File

@@ -70,7 +70,7 @@ func NewCommand() *cobra.Command {
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", config.GetFlag("server-crt", ""), "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.ClientCertFile, "client-crt", config.GetFlag("client-crt", ""), "Client certificate file")
command.PersistentFlags().StringVar(&clientOpts.ClientCertKeyFile, "client-crt-key", config.GetFlag("client-crt-key", ""), "Client certificate key file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", env.StringFromEnv(common.EnvAuthToken, "")), fmt.Sprintf("Authentication token; set this or the %s environment variable", common.EnvAuthToken))
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", ""), "Authentication token")
command.PersistentFlags().BoolVar(&clientOpts.GRPCWeb, "grpc-web", config.GetBoolFlag("grpc-web"), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.")
command.PersistentFlags().StringVar(&clientOpts.GRPCWebRootPath, "grpc-web-root-path", config.GetFlag("grpc-web-root-path", ""), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.")
command.PersistentFlags().StringVar(&cmdutil.LogFormat, "logformat", config.GetFlag("logformat", "text"), "Set the logging format. One of: text|json")

View File

@@ -48,6 +48,8 @@ func AddProjFlags(command *cobra.Command, opts *ProjectOpts) {
command.Flags().StringArrayVar(&opts.allowedNamespacedResources, "allow-namespaced-resource", []string{}, "List of allowed namespaced resources")
command.Flags().StringArrayVar(&opts.deniedNamespacedResources, "deny-namespaced-resource", []string{}, "List of denied namespaced resources")
command.Flags().StringSliceVar(&opts.SourceNamespaces, "source-namespaces", []string{}, "List of source namespaces for applications")
command.Flags().StringArrayVar(&opts.destinationServiceAccounts, "dest-service-accounts", []string{},
"Destination server, namespace and target service account (e.g. https://192.168.99.100:8443,default,default-sa)")
}
func getGroupKindList(values []string) []v1.GroupKind {
@@ -98,8 +100,8 @@ func (opts *ProjectOpts) GetDestinationServiceAccounts() []v1alpha1.ApplicationD
destinationServiceAccounts := make([]v1alpha1.ApplicationDestinationServiceAccount, 0)
for _, destStr := range opts.destinationServiceAccounts {
parts := strings.Split(destStr, ",")
if len(parts) != 2 {
log.Fatalf("Expected destination of the form: server,namespace. Received: %s", destStr)
if len(parts) != 3 {
log.Fatalf("Expected destination service account of the form: server,namespace, defaultServiceAccount. Received: %s", destStr)
} else {
destinationServiceAccounts = append(destinationServiceAccounts, v1alpha1.ApplicationDestinationServiceAccount{
Server: parts[0],

View File

@@ -5,6 +5,8 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
func TestProjectOpts_ResourceLists(t *testing.T) {
@@ -22,3 +24,27 @@ func TestProjectOpts_ResourceLists(t *testing.T) {
[]v1.GroupKind{{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}}, opts.GetDeniedClusterResources(),
)
}
func TestProjectOpts_GetDestinationServiceAccounts(t *testing.T) {
opts := ProjectOpts{
destinationServiceAccounts: []string{
"https://192.168.99.100:8443,test-ns,test-sa",
"https://kubernetes.default.svc.local:6443,guestbook,guestbook-sa",
},
}
assert.ElementsMatch(t,
[]v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://192.168.99.100:8443",
Namespace: "test-ns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.default.svc.local:6443",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
}, opts.GetDestinationServiceAccounts(),
)
}

View File

@@ -178,6 +178,7 @@ const (
// AnnotationKeyAppInstance is the Argo CD application name is used as the instance name
AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id"
AnnotationInstallationID = "argocd.argoproj.io/installation-id"
// AnnotationCompareOptions is a comma-separated list of options for comparison
AnnotationCompareOptions = "argocd.argoproj.io/compare-options"
@@ -254,8 +255,6 @@ const (
EnvHelmIndexCacheDuration = "ARGOCD_HELM_INDEX_CACHE_DURATION"
// EnvAppConfigPath allows to override the configuration path for repo server
EnvAppConfigPath = "ARGOCD_APP_CONF_PATH"
// EnvAuthToken is the environment variable name for the auth token used by the CLI
EnvAuthToken = "ARGOCD_AUTH_TOKEN"
// EnvLogFormat log format that is defined by `--logformat` option
EnvLogFormat = "ARGOCD_LOG_FORMAT"
// EnvLogLevel log level that is defined by `--loglevel` option

View File

@@ -130,6 +130,7 @@ type ApplicationController struct {
statusHardRefreshTimeout time.Duration
statusRefreshJitter time.Duration
selfHealTimeout time.Duration
selfHealBackOff *wait.Backoff
repoClientset apiclient.Clientset
db db.ArgoDB
settingsMgr *settings_util.SettingsManager
@@ -160,6 +161,7 @@ func NewApplicationController(
appHardResyncPeriod time.Duration,
appResyncJitter time.Duration,
selfHealTimeout time.Duration,
selfHealBackoff *wait.Backoff,
repoErrorGracePeriod time.Duration,
metricsPort int,
metricsCacheExpiration time.Duration,
@@ -200,6 +202,7 @@ func NewApplicationController(
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
settingsMgr: settingsMgr,
selfHealTimeout: selfHealTimeout,
selfHealBackOff: selfHealBackoff,
clusterSharding: clusterSharding,
projByNameCache: sync.Map{},
applicationNamespaces: applicationNamespaces,
@@ -1690,7 +1693,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
app.Status.Summary = tree.GetSummary(app)
}
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
canSync, _ := project.Spec.SyncWindows.Matches(app).CanSync(false)
if canSync {
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionUpdated)
setOpMs = opMS
if syncErrCond != nil {
@@ -1980,6 +1984,9 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
InitiatedBy: appv1.OperationInitiator{Automated: true},
Retry: appv1.RetryStrategy{Limit: 5},
}
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
}
if app.Spec.SyncPolicy.Retry != nil {
op.Retry = *app.Spec.SyncPolicy.Retry
}
@@ -1997,6 +2004,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
return nil, 0
} else if alreadyAttempted && selfHeal {
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
op.Sync.SelfHealAttemptsCount++
for _, resource := range resources {
if resource.Status != appv1.SyncStatusCodeSynced {
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
@@ -2115,10 +2123,24 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool,
}
var retryAfter time.Duration
if app.Status.OperationState.FinishedAt == nil {
retryAfter = ctrl.selfHealTimeout
if ctrl.selfHealBackOff == nil {
if app.Status.OperationState.FinishedAt == nil {
retryAfter = ctrl.selfHealTimeout
} else {
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
}
} else {
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
backOff := *ctrl.selfHealBackOff
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
var delay time.Duration
for backOff.Steps > 0 {
delay = backOff.Step()
}
if app.Status.OperationState.FinishedAt == nil {
retryAfter = delay
} else {
retryAfter = delay - time.Since(app.Status.OperationState.FinishedAt.Time)
}
}
return retryAfter <= 0, retryAfter
}

View File

@@ -4,16 +4,18 @@ import (
"context"
"encoding/json"
"errors"
"fmt"
"testing"
"time"
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
"github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
"k8s.io/utils/ptr"
"github.com/argoproj/argo-cd/v2/common"
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
@@ -64,6 +66,7 @@ type fakeData struct {
metricsCacheExpiration time.Duration
applicationNamespaces []string
updateRevisionForPathsResponse *apiclient.UpdateRevisionForPathsResponse
additionalObjs []runtime.Object
}
type MockKubectl struct {
@@ -133,7 +136,9 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
},
Data: data.configMapData,
}
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
runtimeObjs := []runtime.Object{&clust, &secret, &cm}
runtimeObjs = append(runtimeObjs, data.additionalObjs...)
kubeClient := fake.NewSimpleClientset(runtimeObjs...)
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
kubectl := &MockKubectl{Kubectl: &kubetest.MockKubectlCmd{}}
ctrl, err := NewApplicationController(
@@ -151,6 +156,7 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
time.Hour,
time.Second,
time.Minute,
nil,
time.Second*10,
common.DefaultPortArgoCDMetrics,
data.metricsCacheExpiration,
@@ -2184,3 +2190,66 @@ func TestAlreadyAttemptSync(t *testing.T) {
assert.False(t, attempted)
})
}
func assertDurationAround(t *testing.T, expected time.Duration, actual time.Duration) {
delta := time.Second / 2
assert.GreaterOrEqual(t, expected, actual-delta)
assert.LessOrEqual(t, expected, actual+delta)
}
func TestSelfHealExponentialBackoff(t *testing.T) {
ctrl := newFakeController(&fakeData{}, nil)
ctrl.selfHealBackOff = &wait.Backoff{
Factor: 3,
Duration: 2 * time.Second,
Cap: 5 * time.Minute,
}
app := &v1alpha1.Application{
Status: v1alpha1.ApplicationStatus{
OperationState: &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{},
},
},
},
}
testCases := []struct {
attempts int64
finishedAt *metav1.Time
expectedDuration time.Duration
shouldSelfHeal bool
}{{
attempts: 0,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 0,
shouldSelfHeal: true,
}, {
attempts: 1,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 2 * time.Second,
shouldSelfHeal: false,
}, {
attempts: 2,
finishedAt: ptr.To(metav1.Now()),
expectedDuration: 6 * time.Second,
shouldSelfHeal: false,
}, {
attempts: 3,
finishedAt: nil,
expectedDuration: 18 * time.Second,
shouldSelfHeal: false,
}}
for i := range testCases {
tc := testCases[i]
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
app.Status.OperationState.FinishedAt = tc.finishedAt
ok, duration := ctrl.shouldSelfHeal(app)
require.Equal(t, ok, tc.shouldSelfHeal)
assertDurationAround(t, tc.expectedDuration, duration)
})
}
}

View File

@@ -197,6 +197,7 @@ type cacheSettings struct {
clusterSettings clustercache.Settings
appInstanceLabelKey string
trackingMethod appv1.TrackingMethod
installationID string
// resourceOverrides provides a list of ignored differences to ignore watched resource updates
resourceOverrides map[string]appv1.ResourceOverride
@@ -225,6 +226,10 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
if err != nil {
return nil, err
}
installationID, err := c.settingsMgr.GetInstallationID()
if err != nil {
return nil, err
}
resourceUpdatesOverrides, err := c.settingsMgr.GetIgnoreResourceUpdatesOverrides()
if err != nil {
return nil, err
@@ -246,7 +251,7 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
ResourcesFilter: resourcesFilter,
}
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), installationID, resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
}
func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
@@ -523,7 +528,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
res.Health, _ = health.GetResourceHealth(un, cacheSettings.clusterSettings.ResourceHealthOverride)
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod)
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod, cacheSettings.installationID)
if isRoot && appName != "" {
res.AppName = appName
}

View File

@@ -278,6 +278,32 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
}
func isPodInitializedConditionTrue(status *v1.PodStatus) bool {
for _, condition := range status.Conditions {
if condition.Type != v1.PodInitialized {
continue
}
return condition.Status == v1.ConditionTrue
}
return false
}
func isRestartableInitContainer(initContainer *v1.Container) bool {
if initContainer == nil {
return false
}
if initContainer.RestartPolicy == nil {
return false
}
return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
}
func isPodPhaseTerminal(phase v1.PodPhase) bool {
return phase == v1.PodFailed || phase == v1.PodSucceeded
}
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
pod := v1.Pod{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
@@ -288,7 +314,8 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
totalContainers := len(pod.Spec.Containers)
readyContainers := 0
reason := string(pod.Status.Phase)
podPhase := pod.Status.Phase
reason := string(podPhase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
@@ -306,6 +333,21 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
res.Images = append(res.Images, image)
}
// If the Pod carries {type:PodScheduled, reason:SchedulingGated}, set reason to 'SchedulingGated'.
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
reason = v1.PodReasonSchedulingGated
}
}
initContainers := make(map[string]*v1.Container)
for i := range pod.Spec.InitContainers {
initContainers[pod.Spec.InitContainers[i].Name] = &pod.Spec.InitContainers[i]
if isRestartableInitContainer(&pod.Spec.InitContainers[i]) {
totalContainers++
}
}
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
@@ -313,6 +355,12 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case isRestartableInitContainer(initContainers[container.Name]) &&
container.Started != nil && *container.Started:
if container.Ready {
readyContainers++
}
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
@@ -334,8 +382,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
}
break
}
if !initializing {
restarts = 0
if !initializing || isPodInitializedConditionTrue(&pod.Status) {
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
@@ -370,7 +417,9 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
// and https://github.com/kubernetes/kubernetes/issues/90358#issuecomment-617859364
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
reason = "Unknown"
} else if pod.DeletionTimestamp != nil {
// If the pod is being deleted and the pod phase is not succeeded or failed, set the reason to "Terminating".
// See https://github.com/kubernetes/kubectl/issues/1595#issuecomment-2080001023
} else if pod.DeletionTimestamp != nil && !isPodPhaseTerminal(podPhase) {
reason = "Terminating"
}

View File

@@ -285,6 +285,552 @@ func TestGetPodInfo(t *testing.T) {
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
}
func TestGetPodWithInitialContainerInfo(t *testing.T) {
pod := strToUnstructured(`
apiVersion: "v1"
kind: "Pod"
metadata:
labels:
app: "app-with-initial-container"
name: "app-with-initial-container-5f46976fdb-vd6rv"
namespace: "default"
ownerReferences:
- apiVersion: "apps/v1"
kind: "ReplicaSet"
name: "app-with-initial-container-5f46976fdb"
spec:
containers:
- image: "alpine:latest"
imagePullPolicy: "Always"
name: "app-with-initial-container"
initContainers:
- image: "alpine:latest"
imagePullPolicy: "Always"
name: "app-with-initial-container-logshipper"
nodeName: "minikube"
status:
containerStatuses:
- image: "alpine:latest"
name: "app-with-initial-container"
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2024-10-08T08:44:25Z"
initContainerStatuses:
- image: "alpine:latest"
name: "app-with-initial-container-logshipper"
ready: true
restartCount: 0
started: false
state:
terminated:
exitCode: 0
reason: "Completed"
phase: "Running"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/1"},
}, info.Info)
}
func TestGetPodInfoWithSidecar(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
labels:
app: app-with-sidecar
name: app-with-sidecar-6664cc788c-lqlrp
namespace: default
ownerReferences:
- apiVersion: apps/v1
kind: ReplicaSet
name: app-with-sidecar-6664cc788c
spec:
containers:
- image: 'docker.m.daocloud.io/library/alpine:latest'
imagePullPolicy: Always
name: app-with-sidecar
initContainers:
- image: 'docker.m.daocloud.io/library/alpine:latest'
imagePullPolicy: Always
name: logshipper
restartPolicy: Always
nodeName: minikube
status:
containerStatuses:
- image: 'docker.m.daocloud.io/library/alpine:latest'
name: app-with-sidecar
ready: true
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-08T08:39:43Z'
initContainerStatuses:
- image: 'docker.m.daocloud.io/library/alpine:latest'
name: logshipper
ready: true
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-08T08:39:40Z'
phase: Running
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "2/2"},
}, info.Info)
}
func TestGetPodInfoWithInitialContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
generateName: myapp-long-exist-56b7d8794d-
labels:
app: myapp-long-exist
name: myapp-long-exist-56b7d8794d-pbgrd
namespace: linghao
ownerReferences:
- apiVersion: apps/v1
kind: ReplicaSet
name: myapp-long-exist-56b7d8794d
spec:
containers:
- image: alpine:latest
imagePullPolicy: Always
name: myapp-long-exist
initContainers:
- image: alpine:latest
imagePullPolicy: Always
name: myapp-long-exist-logshipper
nodeName: minikube
status:
containerStatuses:
- image: alpine:latest
name: myapp-long-exist
ready: false
restartCount: 0
started: false
state:
waiting:
reason: PodInitializing
initContainerStatuses:
- image: alpine:latest
name: myapp-long-exist-logshipper
ready: false
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-09T08:03:45Z'
phase: Pending
startTime: '2024-10-09T08:02:39Z'
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod has 2 restartable init containers, the first one running but not started.
func TestGetPodInfoWithRestartableInitContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test1
spec:
initContainers:
- name: restartable-init-1
restartPolicy: Always
- name: restartable-init-2
restartPolicy: Always
containers:
- name: container
nodeName: minikube
status:
phase: Pending
initContainerStatuses:
- name: restartable-init-1
ready: false
restartCount: 3
state:
running: {}
started: false
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
- name: restartable-init-2
ready: false
state:
waiting: {}
started: false
containerStatuses:
- ready: false
restartCount: 0
state:
waiting: {}
conditions:
- type: ContainersReady
status: "False"
- type: Initialized
status: "False"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod has 2 restartable init containers, the first one started and the second one running but not started.
func TestGetPodInfoWithPartiallyStartedInitContainers(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test1
spec:
initContainers:
- name: restartable-init-1
restartPolicy: Always
- name: restartable-init-2
restartPolicy: Always
containers:
- name: container
nodeName: minikube
status:
phase: Pending
initContainerStatuses:
- name: restartable-init-1
ready: false
restartCount: 3
state:
running: {}
started: true
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
- name: restartable-init-2
ready: false
state:
running: {}
started: false
containerStatuses:
- ready: false
restartCount: 0
state:
waiting: {}
conditions:
- type: ContainersReady
status: "False"
- type: Initialized
status: "False"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:1/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod has 2 restartable init containers started and 1 container running
func TestGetPodInfoWithStartedInitContainers(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test2
spec:
initContainers:
- name: restartable-init-1
restartPolicy: Always
- name: restartable-init-2
restartPolicy: Always
containers:
- name: container
nodeName: minikube
status:
phase: Running
initContainerStatuses:
- name: restartable-init-1
ready: false
restartCount: 3
state:
running: {}
started: true
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
- name: restartable-init-2
ready: false
state:
running: {}
started: true
containerStatuses:
- ready: true
restartCount: 4
state:
running: {}
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
conditions:
- type: ContainersReady
status: "False"
- type: Initialized
status: "True"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/3"},
{Name: "Restart Count", Value: "7"},
}, info.Info)
}
// Test pod has 1 init container restarting and 1 container not running
func TestGetPodInfoWithNormalInitContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test7
spec:
initContainers:
- name: init-container
containers:
- name: main-container
nodeName: minikube
status:
phase: podPhase
initContainerStatuses:
- ready: false
restartCount: 3
state:
running: {}
lastTerminationState:
terminated:
finishedAt: "2023-10-01T00:00:00Z" # Replace with the actual time
containerStatuses:
- ready: false
restartCount: 0
state:
waiting: {}
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod condition succeed
func TestPodConditionSucceeded(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test8
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Succeeded
containerStatuses:
- ready: false
restartCount: 0
state:
terminated:
reason: Completed
exitCode: 0
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition failed
func TestPodConditionFailed(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test9
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Failed
containerStatuses:
- ready: false
restartCount: 0
state:
terminated:
reason: Error
exitCode: 1
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Error"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition succeed with deletion
func TestPodConditionSucceededWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test10
deletionTimestamp: "2023-10-01T00:00:00Z"
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Succeeded
containerStatuses:
- ready: false
restartCount: 0
state:
terminated:
reason: Completed
exitCode: 0
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition running with deletion
func TestPodConditionRunningWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test11
deletionTimestamp: "2023-10-01T00:00:00Z"
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Running
containerStatuses:
- ready: false
restartCount: 0
state:
running: {}
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition pending with deletion
func TestPodConditionPendingWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test12
deletionTimestamp: "2023-10-01T00:00:00Z"
spec:
nodeName: minikube
containers:
- name: container
status:
phase: Pending
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test PodScheduled condition with reason SchedulingGated
func TestPodScheduledWithSchedulingGated(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
name: test13
spec:
nodeName: minikube
containers:
- name: container1
- name: container2
status:
phase: podPhase
conditions:
- type: PodScheduled
status: "False"
reason: SchedulingGated
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "SchedulingGated"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/2"},
}, info.Info)
}
func TestGetNodeInfo(t *testing.T) {
node := strToUnstructured(`
apiVersion: v1

View File

@@ -161,6 +161,11 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
return nil, nil, false, fmt.Errorf("failed to get Helm settings: %w", err)
}
installationID, err := m.settingsMgr.GetInstallationID()
if err != nil {
return nil, nil, false, fmt.Errorf("failed to get installation ID: %w", err)
}
ts.AddCheckpoint("build_options_ms")
serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
if err != nil {
@@ -230,6 +235,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
RefSources: refSources,
HasMultipleSources: app.Spec.HasMultipleSources(),
InstallationID: installationID,
})
if err != nil {
return nil, nil, false, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
@@ -270,6 +276,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
RefSources: refSources,
ProjectName: proj.Name,
ProjectSourceRepos: proj.Spec.SourceRepos,
InstallationID: installationID,
})
if err != nil {
return nil, nil, false, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
@@ -353,20 +360,24 @@ func DeduplicateTargetObjects(
// getComparisonSettings will return the system level settings related to the
// diff/normalization process.
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, error) {
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, string, error) {
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
if err != nil {
return "", nil, nil, err
return "", nil, nil, "", err
}
appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
if err != nil {
return "", nil, nil, err
return "", nil, nil, "", err
}
resFilter, err := m.settingsMgr.GetResourcesFilter()
if err != nil {
return "", nil, nil, err
return "", nil, nil, "", err
}
return appLabelKey, resourceOverrides, resFilter, nil
installationID, err := m.settingsMgr.GetInstallationID()
if err != nil {
return "", nil, nil, "", err
}
return appLabelKey, resourceOverrides, resFilter, installationID, nil
}
// verifyGnuPGSignature verifies the result of a GnuPG operation for a given git
@@ -417,7 +428,7 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
// revision and overrides in the app spec.
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error) {
ts := stats.NewTimingStats()
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
appLabelKey, resourceOverrides, resFilter, installationID, err := m.getComparisonSettings()
ts.AddCheckpoint("settings_ms")
@@ -585,7 +596,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
for _, liveObj := range liveObjByKey {
if liveObj != nil {
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod)
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod, installationID)
if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) {
fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/")
conditions = append(conditions, v1alpha1.ApplicationCondition{
@@ -724,7 +735,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
}
gvk := obj.GroupVersionKind()
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod, installationID)
resState := v1alpha1.ResourceStatus{
Namespace: obj.GetNamespace(),
@@ -924,9 +935,7 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
return false
}
currentSpec := app.BuildComparedToStatus()
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)
if specChanged {
if !specEqualsCompareTo(app.Spec, app.Status.Sync.ComparedTo) {
log.WithField("useDiffCache", "false").Debug("specChanged")
return false
}
@@ -935,6 +944,25 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
return true
}
// specEqualsCompareTo compares the application spec to the comparedTo status. It normalizes the destination to match
// the comparedTo destination before comparing. It does not mutate the original spec or comparedTo.
func specEqualsCompareTo(spec v1alpha1.ApplicationSpec, comparedTo v1alpha1.ComparedTo) bool {
// Make a copy to be sure we don't mutate the original.
specCopy := spec.DeepCopy()
currentSpec := specCopy.BuildComparedToStatus()
// The spec might have been augmented to include both server and name, so change it to match the comparedTo before
// comparing.
if comparedTo.Destination.Server == "" {
currentSpec.Destination.Server = ""
}
if comparedTo.Destination.Name == "" {
currentSpec.Destination.Name = ""
}
return reflect.DeepEqual(comparedTo, currentSpec)
}
func (m *appStateManager) persistRevisionHistory(
app *v1alpha1.Application,
revision string,
@@ -1029,7 +1057,7 @@ func NewAppStateManager(
// group and kind) match the properties of the live object, or if the tracking method
// used does not provide the required properties for matching.
// Reference: https://github.com/argoproj/argo-cd/issues/8683
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, installationID string) bool {
if live == nil {
return true
}
@@ -1062,7 +1090,7 @@ func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstruc
// to match the properties from the live object. Cluster scoped objects
// carry the app's destination namespace in the tracking annotation,
// but are unique in GVK + name combination.
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod, installationID)
if appInstance != nil {
return isSelfReferencedObj(live, *appInstance)
}

View File

@@ -1372,8 +1372,8 @@ func TestIsLiveResourceManaged(t *testing.T) {
configObj := managedObj.DeepCopy()
// then
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
})
t.Run("will return true if tracked with label", func(t *testing.T) {
// given
@@ -1381,43 +1381,43 @@ func TestIsLiveResourceManaged(t *testing.T) {
configObj := managedObjWithLabel.DeepCopy()
// then
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
})
t.Run("will handle if trackingId has wrong resource name and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
})
t.Run("will handle if trackingId has wrong resource group and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
})
t.Run("will handle if trackingId has wrong kind and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
})
t.Run("will handle if trackingId has wrong namespace and config is nil", func(t *testing.T) {
// given
t.Parallel()
// then
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel, ""))
})
t.Run("will return true if live is nil", func(t *testing.T) {
t.Parallel()
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
})
t.Run("will handle upgrade in desired state APIGroup", func(t *testing.T) {
@@ -1427,11 +1427,13 @@ func TestIsLiveResourceManaged(t *testing.T) {
delete(config.GetAnnotations(), common.AnnotationKeyAppInstance)
// then
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
})
}
func TestUseDiffCache(t *testing.T) {
t.Parallel()
type fixture struct {
testName string
noCache bool
@@ -1692,6 +1694,44 @@ func TestUseDiffCache(t *testing.T) {
expectedUseCache: false,
serverSideDiff: false,
},
{
// There are code paths that modify the ApplicationSpec and augment the destination field with both the
// destination server and name. Since both fields are populated in the app spec but not in the comparedTo,
// we need to make sure we correctly compare the fields and don't miss the cache.
testName: "will return true if the app spec destination contains both server and name, but otherwise matches comparedTo",
noCache: false,
manifestInfos: manifestInfos("rev1"),
sources: sources(),
app: app("httpbin", "rev1", false, &argoappv1.Application{
Spec: argoappv1.ApplicationSpec{
Destination: argoappv1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Name: "httpbin",
Namespace: "httpbin",
},
},
Status: argoappv1.ApplicationStatus{
Resources: []argoappv1.ResourceStatus{},
Sync: argoappv1.SyncStatus{
Status: argoappv1.SyncStatusCodeSynced,
ComparedTo: argoappv1.ComparedTo{
Destination: argoappv1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "httpbin",
},
},
Revision: "rev1",
},
ReconciledAt: &metav1.Time{
Time: time.Now().Add(-time.Hour),
},
},
}),
manifestRevisions: []string{"rev1"},
statusRefreshTimeout: time.Hour * 24,
expectedUseCache: true,
serverSideDiff: true,
},
}
for _, tc := range cases {

View File

@@ -44,6 +44,10 @@ const (
// EnvVarSyncWaveDelay is an environment variable which controls the delay in seconds between
// each sync-wave
EnvVarSyncWaveDelay = "ARGOCD_SYNC_WAVE_DELAY"
// serviceAccountDisallowedCharSet contains the characters that are not allowed to be present
// in a DefaultServiceAccount configured for a DestinationServiceAccount
serviceAccountDisallowedCharSet = "!*[]{}\\/"
)
func (m *appStateManager) getOpenAPISchema(server string) (openapi.Resources, error) {
@@ -170,12 +174,18 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
state.Phase = common.OperationError
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
return
} else if syncWindowPreventsSync(app, proj) {
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
if state.Phase == common.OperationRunning {
state.Message = "Sync operation blocked by sync window"
} else {
isBlocked, err := syncWindowPreventsSync(app, proj)
if isBlocked {
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
if state.Phase == common.OperationRunning {
state.Message = "Sync operation blocked by sync window"
if err != nil {
state.Message = fmt.Sprintf("%s: %v", state.Message, err)
}
}
return
}
return
}
if !isMultiSourceRevision {
@@ -285,10 +295,20 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
log.Errorf("Could not get appInstanceLabelKey: %v", err)
return
}
installationID, err := m.settingsMgr.GetInstallationID()
if err != nil {
log.Errorf("Could not get installation ID: %v", err)
return
}
trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
if m.settingsMgr.IsImpersonationEnabled() {
serviceAccountToImpersonate, err := deriveServiceAccountName(proj, app)
impersonationEnabled, err := m.settingsMgr.IsImpersonationEnabled()
if err != nil {
log.Errorf("could not get impersonation feature flag: %v", err)
return
}
if impersonationEnabled {
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(proj, app)
if err != nil {
state.Phase = common.OperationError
state.Message = fmt.Sprintf("failed to find a matching service account to impersonate: %v", err)
@@ -331,7 +351,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
return (len(syncOp.Resources) == 0 ||
isPostDeleteHook(target) ||
argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) &&
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod)
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod, installationID)
}),
sync.WithManifestValidation(!syncOp.SyncOptions.HasOption(common.SyncOptionsDisableValidation)),
sync.WithSyncWaveHook(delayBetweenSyncWaves),
@@ -548,18 +568,23 @@ func delayBetweenSyncWaves(phase common.SyncPhase, wave int, finalWave bool) err
return nil
}
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) bool {
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) (bool, error) {
window := proj.Spec.SyncWindows.Matches(app)
isManual := false
if app.Status.OperationState != nil {
isManual = !app.Status.OperationState.Operation.InitiatedBy.Automated
}
return !window.CanSync(isManual)
canSync, err := window.CanSync(isManual)
if err != nil {
// prevents sync because sync window has an error
return true, err
}
return !canSync, nil
}
// deriveServiceAccountName determines the service account to be used for impersonation for the sync operation.
// deriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
func deriveServiceAccountName(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
// spec.Destination.Namespace is optional. If not specified, use the Application's
// namespace
serviceAccountNamespace := application.Spec.Destination.Namespace
@@ -569,10 +594,18 @@ func deriveServiceAccountName(project *v1alpha1.AppProject, application *v1alpha
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
// if so, return the service account specified for that destination.
for _, item := range project.Spec.DestinationServiceAccounts {
dstServerMatched := glob.Match(item.Server, application.Spec.Destination.Server)
dstNamespaceMatched := glob.Match(item.Namespace, application.Spec.Destination.Namespace)
dstServerMatched, err := glob.MatchWithError(item.Server, application.Spec.Destination.Server)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
}
dstNamespaceMatched, err := glob.MatchWithError(item.Namespace, application.Spec.Destination.Namespace)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination namespace: %w", err)
}
if dstServerMatched && dstNamespaceMatched {
if strings.Contains(item.DefaultServiceAccount, ":") {
if strings.Trim(item.DefaultServiceAccount, " ") == "" || strings.ContainsAny(item.DefaultServiceAccount, serviceAccountDisallowedCharSet) {
return "", fmt.Errorf("default service account contains invalid chars '%s'", item.DefaultServiceAccount)
} else if strings.Contains(item.DefaultServiceAccount, ":") {
// service account is specified along with its namespace.
return fmt.Sprintf("system:serviceaccount:%s", item.DefaultServiceAccount), nil
} else {

View File

@@ -2,6 +2,7 @@ package controller
import (
"context"
"strconv"
"testing"
"github.com/argoproj/gitops-engine/pkg/sync"
@@ -9,6 +10,7 @@ import (
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@@ -644,6 +646,771 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
})
}
func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
}
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
project := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd-ns",
Name: "testProj",
},
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: destinationServiceAccounts,
},
}
app := &v1alpha1.Application{
ObjectMeta: v1.ObjectMeta{
Namespace: applicationNamespace,
Name: "testApp",
},
Spec: v1alpha1.ApplicationSpec{
Project: "testProj",
Destination: v1alpha1.ApplicationDestination{
Server: destinationServerURL,
Namespace: destinationNamespace,
},
},
}
return &fixture{
project: project,
application: app,
}
}
t.Run("empty destination service accounts", func(t *testing.T) {
// given an application referring a project with no destination service accounts
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
assert.Equal(t, expectedSA, sa)
// then, there should be an error saying no valid match was found
assert.EqualError(t, err, expectedErrMsg)
})
t.Run("exact match of destination namespace", func(t *testing.T) {
// given an application referring a project with exactly one destination service account that matches the application destination,
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having one exact match for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook-test",
DefaultServiceAccount: "guestbook-test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having multiple match for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-3",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be no error and it should use the first matching service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with glob patterns matching the application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "test*",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and should use the first matching glob pattern service account for impersonation
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("no match among a valid list", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with no matches for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "test1",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "test2",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should be an error saying no match was found
require.EqualError(t, err, expectedErrMsg)
assert.Equal(t, expectedSA, sa)
})
t.Run("app destination namespace is empty", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with empty application destination namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa-2",
},
}
destinationNamespace := ""
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:argocd-ns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the service account configured for with empty namespace should be used.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via catch all glob pattern", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having a catch all glob pattern
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns1",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the catch all service account should be returned
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via invalid glob pattern", func(t *testing.T) {
// given an application referring a project with a destination service account having an invalid glob pattern for namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "e[[a*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination namespace")
assert.Equal(t, expectedSA, sa)
})
t.Run("sa specified with a namespace", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts having a matching service account specified with its namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "myns:test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:myns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.
require.NoError(t, err)
})
}
func TestDeriveServiceAccountMatchingServers(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
}
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
project := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: "argocd-ns",
Name: "testProj",
},
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: destinationServiceAccounts,
},
}
app := &v1alpha1.Application{
ObjectMeta: v1.ObjectMeta{
Namespace: applicationNamespace,
Name: "testApp",
},
Spec: v1alpha1.ApplicationSpec{
Project: "testProj",
Destination: v1alpha1.ApplicationDestination{
Server: destinationServerURL,
Namespace: destinationNamespace,
},
},
}
return &fixture{
project: project,
application: app,
}
}
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts and one exact match for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
{
Server: "https://abc.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-test-sa",
},
{
Server: "https://cde.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the right service account must be returned.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts and multiple matches for application destination
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "guestbook",
DefaultServiceAccount: "guestbook-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and first matching service account should be used
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with a matching glob pattern and exact match
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "test*",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account of the glob pattern, being the first match should be returned.
require.NoError(t, err)
})
t.Run("no match among a valid list", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with no match
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa",
},
{
Server: "https://abc.svc.local",
Namespace: "testns",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://cde.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://xyz.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
expectedErr := "no matching service account found for destination server https://xyz.svc.local and namespace testns"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there an error with appropriate message must be returned
require.EqualError(t, err, expectedErr)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via catch all glob pattern", func(t *testing.T) {
// given an application referring a project with multiple destination service accounts with matching catch all glob pattern
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://kubernetes.svc.local",
Namespace: "testns1",
DefaultServiceAccount: "test-sa-2",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "*",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://localhost:6443"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:testns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the service account of the glob pattern match must be returned.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
t.Run("match done via invalid glob pattern", func(t *testing.T) {
// given an application referring a project with a destination service account having an invalid glob pattern for server
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "e[[a*",
Namespace: "test-ns",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://kubernetes.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := ""
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination server")
assert.Equal(t, expectedSA, sa)
})
t.Run("sa specified with a namespace", func(t *testing.T) {
// given app sync impersonation feature is enabled and matching service account is prefixed with a namespace
t.Parallel()
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://abc.svc.local",
Namespace: "testns",
DefaultServiceAccount: "myns:test-sa",
},
{
Server: "https://kubernetes.svc.local",
Namespace: "default",
DefaultServiceAccount: "default-sa",
},
{
Server: "*",
Namespace: "*",
DefaultServiceAccount: "test-sa",
},
}
destinationNamespace := "testns"
destinationServerURL := "https://abc.svc.local"
applicationNamespace := "argocd-ns"
expectedSA := "system:serviceaccount:myns:test-sa"
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
// then, there should not be any error and the service account with the given namespace prefix must be returned.
require.NoError(t, err)
assert.Equal(t, expectedSA, sa)
})
}
func TestSyncWithImpersonate(t *testing.T) {
type fixture struct {
project *v1alpha1.AppProject
application *v1alpha1.Application
controller *ApplicationController
}
setup := func(impersonationEnabled bool, destinationNamespace, serviceAccountName string) *fixture {
app := newFakeApp()
app.Status.OperationState = nil
app.Status.History = nil
project := &v1alpha1.AppProject{
ObjectMeta: v1.ObjectMeta{
Namespace: test.FakeArgoCDNamespace,
Name: "default",
},
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.
ApplicationDestinationServiceAccount{
{
Server: "https://localhost:6443",
Namespace: destinationNamespace,
DefaultServiceAccount: serviceAccountName,
},
},
},
}
additionalObjs := []runtime.Object{}
if serviceAccountName != "" {
syncServiceAccount := &corev1.ServiceAccount{
ObjectMeta: v1.ObjectMeta{
Name: serviceAccountName,
Namespace: test.FakeDestNamespace,
},
}
additionalObjs = append(additionalObjs, syncServiceAccount)
}
data := fakeData{
apps: []runtime.Object{app, project},
manifestResponse: &apiclient.ManifestResponse{
Manifests: []string{},
Namespace: test.FakeDestNamespace,
Server: "https://localhost:6443",
Revision: "abc123",
},
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{},
configMapData: map[string]string{
"application.sync.impersonation.enabled": strconv.FormatBool(impersonationEnabled),
},
additionalObjs: additionalObjs,
}
ctrl := newFakeController(&data, nil)
return &fixture{
project: project,
application: app,
controller: ctrl,
}
}
t.Run("sync with impersonation and no matching service account", func(t *testing.T) {
// given app sync impersonation feature is enabled with an application referring a project no matching service account
f := setup(true, test.FakeArgoCDNamespace, "")
opMessage := "failed to find a matching service account to impersonate: no matching service account found for destination server https://localhost:6443 and namespace fake-dest-ns"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then, app sync should fail with expected error message in operation state
assert.Equal(t, common.OperationError, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
t.Run("sync with impersonation and empty service account match", func(t *testing.T) {
// given app sync impersonation feature is enabled with an application referring a project matching service account that is an empty string
f := setup(true, test.FakeDestNamespace, "")
opMessage := "failed to find a matching service account to impersonate: default service account contains invalid chars ''"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then app sync should fail with expected error message in operation state
assert.Equal(t, common.OperationError, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
t.Run("sync with impersonation and matching sa", func(t *testing.T) {
// given app sync impersonation feature is enabled with an application referring a project matching service account
f := setup(true, test.FakeDestNamespace, "test-sa")
opMessage := "successfully synced (no more tasks)"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then app sync should not fail
assert.Equal(t, common.OperationSucceeded, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
t.Run("sync without impersonation", func(t *testing.T) {
// given app sync impersonation feature is disabled with an application referring a project matching service account
f := setup(false, test.FakeDestNamespace, "")
opMessage := "successfully synced (no more tasks)"
opState := &v1alpha1.OperationState{
Operation: v1alpha1.Operation{
Sync: &v1alpha1.SyncOperation{
Source: &v1alpha1.ApplicationSource{},
},
},
Phase: common.OperationRunning,
}
// when
f.controller.appStateManager.SyncAppState(f.application, opState)
// then application sync should pass using the control plane service account
assert.Equal(t, common.OperationSucceeded, opState.Phase)
assert.Contains(t, opState.Message, opMessage)
})
}
func dig[T any](obj interface{}, path []interface{}) T {
i := obj

View File

@@ -32,23 +32,41 @@ function initializeVersionDropdown() {
window[callbackName] = function(response) {
const div = document.createElement('div');
div.innerHTML = response.html;
document.querySelector(".md-header__inner > .md-header__title").appendChild(div);
const headerTitle = document.querySelector(".md-header__inner > .md-header__title");
if (headerTitle) {
headerTitle.appendChild(div);
}
const container = div.querySelector('.rst-versions');
if (!container) return; // Exit if container not found
// Add caret icon
var caret = document.createElement('div');
caret.innerHTML = "<i class='fa fa-caret-down dropdown-caret'></i>";
caret.classList.add('dropdown-caret');
div.querySelector('.rst-current-version').appendChild(caret);
const currentVersionElem = div.querySelector('.rst-current-version');
if (currentVersionElem) {
currentVersionElem.appendChild(caret);
}
div.querySelector('.rst-current-version').addEventListener('click', function() {
container.classList.toggle('shift-up');
});
// Add click listener to toggle dropdown
if (currentVersionElem && container) {
currentVersionElem.addEventListener('click', function() {
container.classList.toggle('shift-up');
});
}
// Sorting Logic
sortVersionLinks(container);
};
// Load CSS
var CSSLink = document.createElement('link');
CSSLink.rel = 'stylesheet';
CSSLink.href = '/assets/versions.css';
document.getElementsByTagName('head')[0].appendChild(CSSLink);
// Load JSONP Script
var script = document.createElement('script');
const currentVersion = getCurrentVersion();
script.src = 'https://argo-cd.readthedocs.io/_/api/v2/footer_html/?' +
@@ -56,6 +74,58 @@ function initializeVersionDropdown() {
document.getElementsByTagName('head')[0].appendChild(script);
}
// Function to sort version links
function sortVersionLinks(container) {
// Find all <dl> elements within the container
const dlElements = container.querySelectorAll('dl');
dlElements.forEach(dl => {
const dt = dl.querySelector('dt');
if (dt && dt.textContent.trim().toLowerCase() === 'versions') {
// Found the Versions <dl>
const ddElements = Array.from(dl.querySelectorAll('dd'));
// Define sorting criteria
ddElements.sort((a, b) => {
const aText = a.textContent.trim().toLowerCase();
const bText = b.textContent.trim().toLowerCase();
// Prioritize 'latest' and 'stable'
if (aText === 'latest') return -1;
if (bText === 'latest') return 1;
if (aText === 'stable') return -1;
if (bText === 'stable') return 1;
// Extract version numbers (e.g., release-2.9)
const aVersionMatch = aText.match(/release-(\d+(\.\d+)*)/);
const bVersionMatch = bText.match(/release-(\d+(\.\d+)*)/);
if (aVersionMatch && bVersionMatch) {
const aVersion = aVersionMatch[1].split('.').map(Number);
const bVersion = bVersionMatch[1].split('.').map(Number);
for (let i = 0; i < Math.max(aVersion.length, bVersion.length); i++) {
const aNum = aVersion[i] || 0;
const bNum = bVersion[i] || 0;
if (aNum > bNum) return -1;
if (aNum < bNum) return 1;
}
return 0;
}
// Fallback to alphabetical order
return aText.localeCompare(bText);
});
// Remove existing <dd> elements
ddElements.forEach(dd => dl.removeChild(dd));
// Append sorted <dd> elements
ddElements.forEach(dd => dl.appendChild(dd));
}
});
}
// VERSION WARNINGS
window.addEventListener("DOMContentLoaded", function() {
var margin = 30;

View File

@@ -60,7 +60,38 @@ data:
server: https://some-cluster
```
Note: There is no need to restart Argo CD Server after modifiying the
Proxy extensions can also be provided individually using dedicated
Argo CD configmap keys for better GitOps operations. The example below
demonstrates how to configure the same hypothetical httpbin config
above using a dedicated key:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
namespace: argocd
data:
extension.config.httpbin: |
connectionTimeout: 2s
keepAlive: 15s
idleConnectionTimeout: 60s
maxIdleConnections: 30
services:
- url: http://httpbin.org
headers:
- name: some-header
value: '$some.argocd.secret.key'
cluster:
name: some-cluster
server: https://some-cluster
```
Attention: Extension names must be unique in the Argo CD configmap. If
duplicated keys are found, the Argo CD API server will log an error
message and no proxy extension will be registered.
Note: There is no need to restart Argo CD Server after modifying the
`extension.config` entry in Argo CD configmap. Changes will be
automatically applied. A new proxy registry will be built making
all new incoming extensions requests (`<argocd-host>/extensions/*`) to

View File

@@ -1,7 +1,7 @@
# Application Sync using impersonation
!!! warning "Alpha Feature"
This is an experimental, alpha-quality feature that allows you to control the service account used for the sync operation. The configured service account, could have lesser privileges required for creating resources compared to the highly privileged access required for the control plane operations.
This is an experimental, alpha-quality feature that allows you to control the service account used for the sync operation. The configured service account could have lesser privileges required for creating resources compared to the highly privileged access required for the control plane operations.
!!! warning
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
@@ -94,7 +94,7 @@ spec:
sourceRepos:
- '*'
destinations:
- *
- '*'
destinationServiceAccounts:
- server: https://kubernetes.default.svc
namespace: guestbook

View File

@@ -42,6 +42,7 @@ When the ApplicationSet changes, the changes will be applied to each group of Ap
* Sync operations are triggered the same way as if they were triggered by the UI or CLI (by directly setting the `operation` status field on the Application resource). This means that a RollingSync will respect sync windows just as if a user had clicked the "Sync" button in the Argo UI.
* When a sync is triggered, the sync is performed with the same syncPolicy configured for the Application. For example, this preserves the Application's retry settings.
* If an Application is considered "Pending" for `applicationsetcontroller.default.application.progressing.timeout` seconds, the Application is automatically moved to Healthy status (default 300).
* If an Application is not selected in any step, it will be excluded from the rolling sync and needs to be manually synced through the CLI or UI.
#### Example
The following example illustrates how to stage a progressive sync over Applications with explicitly configured environment labels.

View File

@@ -283,6 +283,9 @@ data:
# - annotation+label : Also uses an annotation for tracking, but additionally labels the resource with the application name
application.resourceTrackingMethod: annotation
# Optional installation id. Allows to have multiple installations of Argo CD in the same cluster.
installationID: "my-unique-id"
# disables admin user. Admin is enabled by default
admin.enabled: "false"
# add an additional local user with apiKey and login capabilities
@@ -329,14 +332,14 @@ data:
# spread out the refreshes and give time to the repo-server to catch up. The jitter is the maximum duration that can be
# added to the sync timeout. So, if the sync timeout is 3 minutes and the jitter is 1 minute, then the actual timeout will
# be between 3 and 4 minutes. Disabled when the value is 0, defaults to 0.
timeout.reconciliation.jitter: 0
timeout.reconciliation.jitter: "0"
# cluster.inClusterEnabled indicates whether to allow in-cluster server address. This is enabled by default.
cluster.inClusterEnabled: "true"
# The maximum number of pod logs to render in UI. If the application has more than this number of pods, the logs will not be rendered.
# This is to prevent the UI from becoming unresponsive when rendering a large number of logs. Default is 10.
server.maxPodLogsToRender: 10
server.maxPodLogsToRender: "10"
# Application pod logs RBAC enforcement enables control over who can and who can't view application pod logs.
# When you enable the switch, pod logs will be visible only to admin role by default. Other roles/users will not be able to view them via cli and UI.
@@ -425,7 +428,7 @@ data:
name: some-cluster
server: https://some-cluster
# The maximum size of the payload that can be sent to the webhook server.
webhook.maxPayloadSizeMB: 1024
webhook.maxPayloadSizeMB: "1024"
# application.sync.impersonation.enabled indicates whether the application sync can be decoupled from control plane service account using impersonation.
# application.sync.impersonation.enabled enables application sync to use a custom service account, via impersonation. This allows decoupling sync from control-plane service account.
application.sync.impersonation.enabled: "false"

View File

@@ -47,8 +47,11 @@ data:
controller.log.level: "info"
# Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
controller.metrics.cache.expiration: "24h0m0s"
# Specifies timeout between application self heal attempts (default 5)
controller.self.heal.timeout.seconds: "5"
# Specifies exponential backoff timeout parameters between application self heal attempts
controller.self.heal.timeout.seconds: "2"
controller.self.heal.backoff.factor: "3"
controller.self.heal.backoff.cap.seconds: "300"
# Cache expiration for app state (default 1h0m0s)
controller.app.state.cache.expiration: "1h0m0s"
# Specifies if resource health should be persisted in app CRD (default true)

View File

@@ -98,20 +98,27 @@ data:
return hs
```
In order to prevent duplication of the custom health check for potentially multiple resources, it is also possible to specify a wildcard in the resource kind, and anywhere in the resource group, like this:
In order to prevent duplication of custom health checks for potentially multiple resources, it is also possible to
specify a wildcard in the resource kind, and anywhere in the resource group, like this:
```yaml
resource.customizations.health.ec2.aws.crossplane.io_*: |
...
resource.customizations: |
ec2.aws.crossplane.io/*:
health.lua: |
...
```
```yaml
resource.customizations.health.*.aws.crossplane.io_*: |
...
# If a key _begins_ with a wildcard, please ensure that the GVK key is quoted.
resource.customizations: |
"*.aws.crossplane.io/*":
health.lua: |
...
```
!!!important
Please, note that there can be ambiguous resolution of wildcards, see [#16905](https://github.com/argoproj/argo-cd/issues/16905)
Please, note that wildcards are only supported when using the `resource.customizations` key, the `resource.customizations.health.<group>_<kind>`
style keys do not work since wildcards (`*`) are not supported in Kubernetes configmap keys.
The `obj` is a global variable which contains the resource. The script must return an object with status and optional message field.
The custom health check might return one of the following health statuses:
@@ -121,7 +128,7 @@ The custom health check might return one of the following health statuses:
* `Degraded` - the resource is degraded
* `Suspended` - the resource is suspended and waiting for some external event to resume (e.g. suspended CronJob or paused Deployment)
By default health typically returns `Progressing` status.
By default, health typically returns a `Progressing` status.
NOTE: As a security measure, access to the standard Lua libraries will be disabled by default. Admins can control access by
setting `resource.customizations.useOpenLibs.<group>_<kind>`. In the following example, standard libraries are enabled for health check of `cert-manager.io/Certificate`.

View File

@@ -471,7 +471,7 @@ Once we create this service, we can configure the Ingress to conditionally route
```
## [Istio](https://www.istio.io)
You can put Argo CD behind Istio using following configurations. Here we will achieve both serving Argo CD behind istio and using subpath on Istio
You can put Argo CD behind Istio using following configurations. Here we will achive both serving Argo CD behind istio and using subpath on Istio
First we need to make sure that we can run Argo CD with subpath (ie /argocd). For this we have used install.yaml from argocd project as is

View File

@@ -40,27 +40,6 @@ You need to check your argocd-notifications controller version. For instance, th
You have not defined `xxxx` in `argocd-notifications-cm` or to fail to parse settings.
### GitHub.repoURL (\u003cno value\u003e) does not have a / using the configuration
You probably have an Application with [multiple sources](https://argo-cd.readthedocs.io/en/stable/user-guide/multiple_sources/):
```yaml
spec:
sources: # <- multiple sources
- repoURL: https://github.com/exampleOrg/first.git
path: sources/example
- repoURL: https://github.com/exampleOrg/second.git
targetRevision: "{{branch}}"
```
So standard notification template won't work (`{{.app.spec.source.repoURL}}`). You should choose a single source instead:
```yaml
template.example: |
github:
repoURLPath: "{{ (index .app.spec.sources 0).repoURL }}"
```
## config referenced xxx, but key does not exist in secret
- If you are using a custom secret, check that the secret is in the same namespace

View File

@@ -66,7 +66,10 @@ argocd-application-controller [flags]
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
--self-heal-backoff-cap-seconds int Specifies max timeout of exponential backoff between application self heal attempts (default 300)
--self-heal-backoff-factor int Specifies factor of exponential timeout between application self heal attempts (default 3)
--self-heal-backoff-timeout-seconds int Specifies initial timeout of exponential backoff between self heal attempts (default 2)
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
--sentinelmaster string Redis sentinel master group name. (default "master")
--server string The address and port of the Kubernetes API server

View File

@@ -1,2 +1,5 @@
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
version.
| Argo CD version | Kubernetes versions |
|-----------------|---------------------|
| 2.13 | v1.30, v1.29, v1.28, v1.27 |
| 2.12 | v1.29, v1.28, v1.27, v1.26 |
| 2.11 | v1.29, v1.28, v1.27, v1.26, v1.25 |

View File

@@ -1,5 +1,9 @@
# v2.12 to 2.13
## Upgraded Helm Version
Note that bundled Helm version has been upgraded from 3.15.2 to 3.15.4.
## Custom Resource Actions for Flux Resources
[`Custom Resource Actions`](../resource_actions.md#Custom-Resource-Actions) have been added for Flux Resources.
@@ -66,4 +70,4 @@ The default extension for log files generated by Argo CD when using the "Downloa
If you have any custom scripts or tools that depend on the `.txt` extension, please update them accordingly.
## Added proxy to kustomize
Proxy config set on repository credentials / repository templates is now passed down to the `kustomize build` command.
Proxy config set on repository credentials / repository templates is now passed down to the `kustomie build` command.

View File

@@ -68,9 +68,9 @@ This proposal would allow ArgoCD administrators to manage the cluster permission
### Goals
- Applications may only impersonate ServiceAccounts that live in the same namespace as the destination namespace configured in the application.If the service account is created in a different namespace, then the user can provide the service account name in the format `<namespace>:<service_account_name>` . ServiceAccount to be used for syncing each application is determined by the target destination configured in the `AppProject` associated with the `Application`.
- If impersonation feature is enabled, and no service account name is provided in the associated `AppProject`, then the sync operation would fail with an appropriate error message. Users can configure a catch all service account matching all destinations to avoid such sync errors.
- If impersonation feature is enabled, and no service account name is provided in the associated `AppProject`, then the default service account of the destination namespace of the `Application` should be used.
- Access restrictions implemented through properties in AppProject (if done) must have the existing behavior. From a security standpoint, any restrictions that were available before switching to a service account based approach should continue to exist even when the impersonation feature is enabled.
- The feature can be enabled/disabled only at the system level. Once enabled/disabled, it is applicable to all ArgoCD `Applications`.
- The feature can be enabled/disabled only at the system level. Once enabled/disabled, it is applicable to all Argo CD `Applications`.
### Non-Goals
@@ -82,7 +82,7 @@ As part of this proposal, it would be possible for an ArgoCD Admin to specify a
When applications gets synced, based on its destination (target cluster and namespace combination), the `defaultServiceAccount` configured in the `AppProject` will be selected and used for impersonation when executing the kubectl commands for the sync operation.
We would be introducing a new element `destinationServiceAccounts` in `AppProject.spec`. This element is used for the sole purpose of specifying the impersonation configuration. The `defaultServiceAccount` configured for the `AppProject` would be used for the sync operation for a particular destination cluster and namespace. If impersonation feature is enabled and no specific service account is provided in the `AppProject` CR, then the sync operation will fail with an error. Users can configure a catch all service account matching all destinations to avoid such sync errors.
We would be introducing a new element `destinationServiceAccounts` in `AppProject.spec`. This element is used for the sole purpose of specifying the impersonation configuration. The `defaultServiceAccount` configured for the `AppProject` would be used for the sync operation for a particular destination cluster and namespace. If impersonation feature is enabled and no specific service account is provided in the `AppProject` CR, then the `default` service account in the destination namespace would be used for impersonation.
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -109,7 +109,7 @@ spec:
- server: https://kubernetes.default.svc
namespace: guestbook-stage
defaultServiceAccount: guestbook-stage-deployer
- server: '*
- server: '*'
namespace: '*'
defaultServiceAccount: default # catch all service account to be used when all other matches fail.
```
@@ -161,7 +161,10 @@ So that, I can use a generic convention of naming service accounts and avoid ass
#### Component: ArgoCD Application Controller
- Provide a configuration in `argocd-cm` which can be modified to enable the Impersonation feature. Set `application.sync.impersonation.enabled: "true"` in the Argo CD ConfigMap. Default value of `application.sync.impersonation.enabled` would be `"false"` and user has to explicitly override it to use this feature.
- Provide a configuration in `argocd-cm` which can be modified to enable the Impersonation feature. Set `applicationcontroller.enable.impersonation: true` in the Argo CD ConfigMap. Default value of `applicationcontroller.enable.impersonation` would be `false` and user has to explicitly override it to use this feature.
- Provide an option to override the Impersonation feature using environment variables.
Set `ARGOCD_APPLICATION_CONTROLLER_ENABLE_IMPERSONATION=true` in the Application controller environment variables. Default value of the environment variable must be `false` and user has to explicitly set it to `true` to use this feature.
- Provide an option to enable this feature using a command line flag `--enable-impersonation`. This new argument option needs to be added to the Application controller args.
- Fix Application Controller `sync.go` to set the Impersonate configuration from the AppProject CR to the `SyncContext` Object (rawConfig and restConfig field, need to understand which config is used for the actual sync and if both configs need to be impersonated.)
#### Component: ArgoCD UI

View File

@@ -14,11 +14,11 @@ recent minor releases.
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
| [go.mod](master/argocd-test.html) | 0 | 0 | 1 | 0 |
| [ui/yarn.lock](master/argocd-test.html) | 0 | 0 | 2 | 0 |
| [ui/yarn.lock](master/argocd-test.html) | 0 | 0 | 1 | 0 |
| [dex:v2.41.1](master/ghcr.io_dexidp_dex_v2.41.1.html) | 0 | 0 | 0 | 1 |
| [haproxy:2.6.17-alpine](master/public.ecr.aws_docker_library_haproxy_2.6.17-alpine.html) | 0 | 0 | 2 | 3 |
| [redis:7.0.15-alpine](master/public.ecr.aws_docker_library_redis_7.0.15-alpine.html) | 0 | 0 | 0 | 0 |
| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 4 | 8 |
| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 6 | 8 |
| [redis:7.0.15-alpine](master/redis_7.0.15-alpine.html) | 0 | 0 | 0 | 0 |
| [install.yaml](master/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](master/argocd-iac-namespace-install.html) | - | - | - | - |
@@ -28,37 +28,37 @@ recent minor releases.
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
| [go.mod](v2.12.3/argocd-test.html) | 0 | 0 | 2 | 0 |
| [ui/yarn.lock](v2.12.3/argocd-test.html) | 0 | 0 | 2 | 0 |
| [ui/yarn.lock](v2.12.3/argocd-test.html) | 0 | 0 | 1 | 0 |
| [dex:v2.38.0](v2.12.3/ghcr.io_dexidp_dex_v2.38.0.html) | 0 | 0 | 6 | 6 |
| [haproxy:2.6.17-alpine](v2.12.3/public.ecr.aws_docker_library_haproxy_2.6.17-alpine.html) | 0 | 0 | 2 | 3 |
| [redis:7.0.15-alpine](v2.12.3/public.ecr.aws_docker_library_redis_7.0.15-alpine.html) | 0 | 0 | 0 | 0 |
| [argocd:v2.12.3](v2.12.3/quay.io_argoproj_argocd_v2.12.3.html) | 0 | 0 | 8 | 8 |
| [argocd:v2.12.3](v2.12.3/quay.io_argoproj_argocd_v2.12.3.html) | 0 | 0 | 7 | 8 |
| [redis:7.0.15-alpine](v2.12.3/redis_7.0.15-alpine.html) | 0 | 0 | 0 | 0 |
| [install.yaml](v2.12.3/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](v2.12.3/argocd-iac-namespace-install.html) | - | - | - | - |
### v2.11.8
### v2.11.7
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
| [go.mod](v2.11.8/argocd-test.html) | 0 | 1 | 3 | 0 |
| [ui/yarn.lock](v2.11.8/argocd-test.html) | 0 | 0 | 2 | 0 |
| [dex:v2.38.0](v2.11.8/ghcr.io_dexidp_dex_v2.38.0.html) | 0 | 0 | 6 | 6 |
| [haproxy:2.6.14-alpine](v2.11.8/haproxy_2.6.14-alpine.html) | 0 | 1 | 7 | 6 |
| [argocd:v2.11.8](v2.11.8/quay.io_argoproj_argocd_v2.11.8.html) | 0 | 0 | 7 | 16 |
| [redis:7.0.15-alpine](v2.11.8/redis_7.0.15-alpine.html) | 0 | 0 | 0 | 0 |
| [install.yaml](v2.11.8/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](v2.11.8/argocd-iac-namespace-install.html) | - | - | - | - |
| [go.mod](v2.11.7/argocd-test.html) | 0 | 1 | 3 | 0 |
| [ui/yarn.lock](v2.11.7/argocd-test.html) | 0 | 0 | 1 | 0 |
| [dex:v2.38.0](v2.11.7/ghcr.io_dexidp_dex_v2.38.0.html) | 0 | 0 | 6 | 6 |
| [haproxy:2.6.14-alpine](v2.11.7/haproxy_2.6.14-alpine.html) | 0 | 1 | 7 | 6 |
| [argocd:v2.11.7](v2.11.7/quay.io_argoproj_argocd_v2.11.7.html) | 0 | 0 | 10 | 20 |
| [redis:7.0.15-alpine](v2.11.7/redis_7.0.15-alpine.html) | 0 | 0 | 0 | 0 |
| [install.yaml](v2.11.7/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](v2.11.7/argocd-iac-namespace-install.html) | - | - | - | - |
### v2.10.16
| | Critical | High | Medium | Low |
|---:|:--------:|:----:|:------:|:---:|
| [go.mod](v2.10.16/argocd-test.html) | 0 | 1 | 4 | 0 |
| [ui/yarn.lock](v2.10.16/argocd-test.html) | 0 | 0 | 2 | 0 |
| [ui/yarn.lock](v2.10.16/argocd-test.html) | 0 | 0 | 1 | 0 |
| [dex:v2.37.0](v2.10.16/ghcr.io_dexidp_dex_v2.37.0.html) | 1 | 1 | 10 | 6 |
| [haproxy:2.6.14-alpine](v2.10.16/haproxy_2.6.14-alpine.html) | 0 | 1 | 7 | 6 |
| [argocd:v2.10.16](v2.10.16/quay.io_argoproj_argocd_v2.10.16.html) | 0 | 0 | 11 | 20 |
| [argocd:v2.10.16](v2.10.16/quay.io_argoproj_argocd_v2.10.16.html) | 0 | 0 | 10 | 20 |
| [redis:7.0.15-alpine](v2.10.16/redis_7.0.15-alpine.html) | 0 | 0 | 0 | 0 |
| [install.yaml](v2.10.16/argocd-iac-install.html) | - | - | - | - |
| [namespace-install.yaml](v2.10.16/argocd-iac-namespace-install.html) | - | - | - | - |

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:20:57 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:20:48 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:21:06 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:20:58 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -7,7 +7,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Snyk test report</title>
<meta name="description" content="3 known vulnerabilities found in 5 vulnerable dependency paths.">
<meta name="description" content="2 known vulnerabilities found in 2 vulnerable dependency paths.">
<base target="_blank">
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
sizes="194x194">
@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:18:53 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:18:34 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>
@@ -467,8 +467,8 @@
</div>
<div class="meta-counts">
<div class="meta-count"><span>3</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>5 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>2 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2132</span> <span>dependencies</span></div>
</div><!-- .meta-counts -->
</div><!-- .layout-container--short -->
@@ -477,173 +477,6 @@
<div class="layout-container" style="padding-top: 35px;">
<div class="cards--vuln filter--patch filter--ignore">
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Regular Expression Denial of Service (ReDoS)</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: /argo-cd <span class="list-paths__item__arrow"></span> ui/yarn.lock
</li>
<li class="card__meta__item">
Package Manager: npm
</li>
<li class="card__meta__item">
Vulnerable module:
path-to-regexp
</li>
<li class="card__meta__item">Introduced through:
argo-cd-ui@1.0.0, react-router@4.3.1 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
argo-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="overview">Overview</h2>
<p>Affected versions of this package are vulnerable to Regular Expression Denial of Service (ReDoS) when including multiple regular expression parameters in a single segment, which will produce the regular expression <code>/^\/([^\/]+?)-([^\/]+?)\/?$/</code>, if two parameters within a single segment are separated by a character other than a <code>/</code> or <code>.</code>. Poor performance will block the event loop and can lead to a DoS.</p>
<p><strong>Note:</strong>
Version 0.1.10 is patched to mitigate this but is also vulnerable if custom regular expressions are used. Due to the existence of this attack vector, the Snyk security team have decided to err on the side of caution in considering the very widely-used v0 branch vulnerable, while the 8.0.0 release has completely eliminated the vulnerable functionality.</p>
<h2 id="workaround">Workaround</h2>
<p>This vulnerability can be avoided by using a custom regular expression for parameters after the first in a segment, which excludes <code>-</code> and <code>/</code>.</p>
<h2 id="poc">PoC</h2>
<pre><code class="language-js">/a${&#39;-a&#39;.repeat(8_000)}/a
</code></pre>
<h2 id="details">Details</h2>
<p>Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its original and legitimate users. There are many types of DoS attacks, ranging from trying to clog the network pipes to the system by generating a large volume of traffic from many machines (a Distributed Denial of Service - DDoS - attack) to sending crafted requests that cause a system to crash or take a disproportional amount of time to process.</p>
<p>The Regular expression Denial of Service (ReDoS) is a type of Denial of Service attack. Regular expressions are incredibly powerful, but they aren&#39;t very intuitive and can ultimately end up making it easy for attackers to take your site down.</p>
<p>Lets take the following regular expression as an example:</p>
<pre><code class="language-js">regex = /A(B|C+)+D/
</code></pre>
<p>This regular expression accomplishes the following:</p>
<ul>
<li><code>A</code> The string must start with the letter &#39;A&#39;</li>
<li><code>(B|C+)+</code> The string must then follow the letter A with either the letter &#39;B&#39; or some number of occurrences of the letter &#39;C&#39; (the <code>+</code> matches one or more times). The <code>+</code> at the end of this section states that we can look for one or more matches of this section.</li>
<li><code>D</code> Finally, we ensure this section of the string ends with a &#39;D&#39;</li>
</ul>
<p>The expression would match inputs such as <code>ABBD</code>, <code>ABCCCCD</code>, <code>ABCBCCCD</code> and <code>ACCCCCD</code></p>
<p>It most cases, it doesn&#39;t take very long for a regex engine to find a match:</p>
<pre><code class="language-bash">$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCD&quot;)&#39;
0.04s user 0.01s system 95% cpu 0.052 total
$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCX&quot;)&#39;
1.79s user 0.02s system 99% cpu 1.812 total
</code></pre>
<p>The entire process of testing it against a 30 characters long string takes around ~52ms. But when given an invalid string, it takes nearly two seconds to complete the test, over ten times as long as it took to test a valid string. The dramatic difference is due to the way regular expressions get evaluated.</p>
<p>Most Regex engines will work very similarly (with minor differences). The engine will match the first possible way to accept the current character and proceed to the next one. If it then fails to match the next one, it will backtrack and see if there was another way to digest the previous character. If it goes too far down the rabbit hole only to find out the string doesnt match in the end, and if many characters have multiple valid regex paths, the number of backtracking steps can become very large, resulting in what is known as <em>catastrophic backtracking</em>.</p>
<p>Let&#39;s look at how our expression runs into this problem, using a shorter string: &quot;ACCCX&quot;. While it seems fairly straightforward, there are still four different ways that the engine could match those three C&#39;s:</p>
<ol>
<li>CCC</li>
<li>CC+C</li>
<li>C+CC</li>
<li>C+C+C.</li>
</ol>
<p>The engine has to try each of those combinations to see if any of them potentially match against the expression. When you combine that with the other steps the engine must take, we can use <a href="https://regex101.com/debugger">RegEx 101 debugger</a> to see the engine has to take a total of 38 steps before it can determine the string doesn&#39;t match.</p>
<p>From there, the number of steps the engine must use to validate a string just continues to grow.</p>
<table>
<thead>
<tr>
<th>String</th>
<th align="right">Number of C&#39;s</th>
<th align="right">Number of steps</th>
</tr>
</thead>
<tbody><tr>
<td>ACCCX</td>
<td align="right">3</td>
<td align="right">38</td>
</tr>
<tr>
<td>ACCCCX</td>
<td align="right">4</td>
<td align="right">71</td>
</tr>
<tr>
<td>ACCCCCX</td>
<td align="right">5</td>
<td align="right">136</td>
</tr>
<tr>
<td>ACCCCCCCCCCCCCCX</td>
<td align="right">14</td>
<td align="right">65,553</td>
</tr>
</tbody></table>
<p>By the time the string includes 14 C&#39;s, the engine has to take over 65,000 steps just to see if the string is valid. These extreme situations can cause them to work very slowly (exponentially related to input size, as shown above), allowing an attacker to exploit this and can cause the service to excessively consume CPU, resulting in a Denial of Service.</p>
<h2 id="remediation">Remediation</h2>
<p>Upgrade <code>path-to-regexp</code> to version 8.0.0 or higher.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/29b96b4a1de52824e1ca0f49a701183cc4ed476f">GitHub Commit</a></li>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/60f2121e9b66b7b622cc01080df0aabda9eedee6">GitHub Commit</a></li>
<li><a href="https://blakeembrey.com/posts/2024-09-web-redos/">Vulnerability Write-up</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-JS-PATHTOREGEXP-7925106">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Concurrent Execution using Shared Resource with Improper Synchronization (&#x27;Race Condition&#x27;)</h2>
<div class="card__section">

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:19:03 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:18:44 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:19:08 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:18:52 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:19:12 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:18:59 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -7,7 +7,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Snyk test report</title>
<meta name="description" content="12 known vulnerabilities found in 66 vulnerable dependency paths.">
<meta name="description" content="14 known vulnerabilities found in 68 vulnerable dependency paths.">
<base target="_blank">
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
sizes="194x194">
@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:19:30 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:19:18 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>
@@ -470,8 +470,8 @@
</div>
<div class="meta-counts">
<div class="meta-count"><span>12</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>66 vulnerable dependency paths</span></div>
<div class="meta-count"><span>14</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>68 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2355</span> <span>dependencies</span></div>
</div><!-- .meta-counts -->
</div><!-- .layout-container--short -->
@@ -1050,7 +1050,7 @@
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">CVE-2024-8096</h2>
<h2 class="card__title">Integer Overflow or Wraparound</h2>
<div class="card__section">
<div class="label label--medium">
@@ -1069,7 +1069,7 @@
<li class="card__meta__item">
Vulnerable module:
curl/libcurl3t64-gnutls
expat/libexpat1
</li>
<li class="card__meta__item">Introduced through:
@@ -1091,7 +1091,7 @@
<span class="list-paths__item__arrow"></span>
git@1:2.43.0-1ubuntu7.1
<span class="list-paths__item__arrow"></span>
curl/libcurl3t64-gnutls@8.5.0-2ubuntu10.3
expat/libexpat1@2.6.1-2build1
</span>
@@ -1103,23 +1103,168 @@
<hr/>
<!-- Overview -->
<h2 id="nvd-description">NVD Description</h2>
<p><strong><em>Note:</em></strong> <em>Versions mentioned in the description apply only to the upstream <code>curl</code> package and not the <code>curl</code> package as distributed by <code>Ubuntu</code>.</em>
<p><strong><em>Note:</em></strong> <em>Versions mentioned in the description apply only to the upstream <code>expat</code> package and not the <code>expat</code> package as distributed by <code>Ubuntu</code>.</em>
<em>See <code>How to fix?</code> for <code>Ubuntu:24.04</code> relevant fixed versions and status.</em></p>
<p>When curl is told to use the Certificate Status Request TLS extension, often referred to as OCSP stapling, to verify that the server certificate is valid, it might fail to detect some OCSP problems and instead wrongly consider the response as fine. If the returned status reports another error than &#39;revoked&#39; (like for example &#39;unauthorized&#39;) it is not treated as a bad certficate.</p>
<p>An issue was discovered in libexpat before 2.6.3. dtdCopy in xmlparse.c can have an integer overflow for nDefaultAtts on 32-bit platforms (where UINT_MAX equals SIZE_MAX).</p>
<h2 id="remediation">Remediation</h2>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>curl</code>.</p>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>expat</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-8096">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-8096</a></li>
<li><a href="https://curl.se/docs/CVE-2024-8096.html">https://curl.se/docs/CVE-2024-8096.html</a></li>
<li><a href="https://curl.se/docs/CVE-2024-8096.json">https://curl.se/docs/CVE-2024-8096.json</a></li>
<li><a href="https://hackerone.com/reports/2669852">https://hackerone.com/reports/2669852</a></li>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45491">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45491</a></li>
<li><a href="https://github.com/libexpat/libexpat/issues/888">https://github.com/libexpat/libexpat/issues/888</a></li>
<li><a href="https://github.com/libexpat/libexpat/pull/891">https://github.com/libexpat/libexpat/pull/891</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2404-CURL-7931919">More about this vulnerability</a></p>
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2404-EXPAT-7885392">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">XML External Entity (XXE) Injection</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: quay.io/argoproj/argocd:latest/argoproj/argocd <span class="list-paths__item__arrow"></span> Dockerfile
</li>
<li class="card__meta__item">
Package Manager: ubuntu:24.04
</li>
<li class="card__meta__item">
Vulnerable module:
expat/libexpat1
</li>
<li class="card__meta__item">Introduced through:
docker-image|quay.io/argoproj/argocd@latest, git@1:2.43.0-1ubuntu7.1 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
docker-image|quay.io/argoproj/argocd@latest
<span class="list-paths__item__arrow"></span>
git@1:2.43.0-1ubuntu7.1
<span class="list-paths__item__arrow"></span>
expat/libexpat1@2.6.1-2build1
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="nvd-description">NVD Description</h2>
<p><strong><em>Note:</em></strong> <em>Versions mentioned in the description apply only to the upstream <code>expat</code> package and not the <code>expat</code> package as distributed by <code>Ubuntu</code>.</em>
<em>See <code>How to fix?</code> for <code>Ubuntu:24.04</code> relevant fixed versions and status.</em></p>
<p>An issue was discovered in libexpat before 2.6.3. xmlparse.c does not reject a negative length for XML_ParseBuffer.</p>
<h2 id="remediation">Remediation</h2>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>expat</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45490">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45490</a></li>
<li><a href="https://github.com/libexpat/libexpat/issues/887">https://github.com/libexpat/libexpat/issues/887</a></li>
<li><a href="https://github.com/libexpat/libexpat/pull/890">https://github.com/libexpat/libexpat/pull/890</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2404-EXPAT-7885502">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Integer Overflow or Wraparound</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: quay.io/argoproj/argocd:latest/argoproj/argocd <span class="list-paths__item__arrow"></span> Dockerfile
</li>
<li class="card__meta__item">
Package Manager: ubuntu:24.04
</li>
<li class="card__meta__item">
Vulnerable module:
expat/libexpat1
</li>
<li class="card__meta__item">Introduced through:
docker-image|quay.io/argoproj/argocd@latest, git@1:2.43.0-1ubuntu7.1 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
docker-image|quay.io/argoproj/argocd@latest
<span class="list-paths__item__arrow"></span>
git@1:2.43.0-1ubuntu7.1
<span class="list-paths__item__arrow"></span>
expat/libexpat1@2.6.1-2build1
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="nvd-description">NVD Description</h2>
<p><strong><em>Note:</em></strong> <em>Versions mentioned in the description apply only to the upstream <code>expat</code> package and not the <code>expat</code> package as distributed by <code>Ubuntu</code>.</em>
<em>See <code>How to fix?</code> for <code>Ubuntu:24.04</code> relevant fixed versions and status.</em></p>
<p>An issue was discovered in libexpat before 2.6.3. nextScaffoldPart in xmlparse.c can have an integer overflow for m_groupSize on 32-bit platforms (where UINT_MAX equals SIZE_MAX).</p>
<h2 id="remediation">Remediation</h2>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>expat</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45492">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45492</a></li>
<li><a href="https://github.com/libexpat/libexpat/issues/889">https://github.com/libexpat/libexpat/issues/889</a></li>
<li><a href="https://github.com/libexpat/libexpat/pull/892">https://github.com/libexpat/libexpat/pull/892</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2404-EXPAT-7885595">More about this vulnerability</a></p>
</div>
</div><!-- .card -->

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:19:34 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:19:22 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:27:49 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:28:10 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:27:58 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:28:21 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -7,7 +7,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Snyk test report</title>
<meta name="description" content="7 known vulnerabilities found in 163 vulnerable dependency paths.">
<meta name="description" content="6 known vulnerabilities found in 160 vulnerable dependency paths.">
<base target="_blank">
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
sizes="194x194">
@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:25:54 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:26:11 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>
@@ -467,8 +467,8 @@
</div>
<div class="meta-counts">
<div class="meta-count"><span>7</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>163 vulnerable dependency paths</span></div>
<div class="meta-count"><span>6</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>160 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2042</span> <span>dependencies</span></div>
</div><!-- .meta-counts -->
</div><!-- .layout-container--short -->
@@ -3139,173 +3139,6 @@
<p><a href="https://snyk.io/vuln/SNYK-GOLANG-GOLANGORGXNETHTTP2-6531285">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Regular Expression Denial of Service (ReDoS)</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: /argo-cd <span class="list-paths__item__arrow"></span> ui/yarn.lock
</li>
<li class="card__meta__item">
Package Manager: npm
</li>
<li class="card__meta__item">
Vulnerable module:
path-to-regexp
</li>
<li class="card__meta__item">Introduced through:
argo-cd-ui@1.0.0, react-router@4.3.1 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
argo-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="overview">Overview</h2>
<p>Affected versions of this package are vulnerable to Regular Expression Denial of Service (ReDoS) when including multiple regular expression parameters in a single segment, which will produce the regular expression <code>/^\/([^\/]+?)-([^\/]+?)\/?$/</code>, if two parameters within a single segment are separated by a character other than a <code>/</code> or <code>.</code>. Poor performance will block the event loop and can lead to a DoS.</p>
<p><strong>Note:</strong>
Version 0.1.10 is patched to mitigate this but is also vulnerable if custom regular expressions are used. Due to the existence of this attack vector, the Snyk security team have decided to err on the side of caution in considering the very widely-used v0 branch vulnerable, while the 8.0.0 release has completely eliminated the vulnerable functionality.</p>
<h2 id="workaround">Workaround</h2>
<p>This vulnerability can be avoided by using a custom regular expression for parameters after the first in a segment, which excludes <code>-</code> and <code>/</code>.</p>
<h2 id="poc">PoC</h2>
<pre><code class="language-js">/a${&#39;-a&#39;.repeat(8_000)}/a
</code></pre>
<h2 id="details">Details</h2>
<p>Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its original and legitimate users. There are many types of DoS attacks, ranging from trying to clog the network pipes to the system by generating a large volume of traffic from many machines (a Distributed Denial of Service - DDoS - attack) to sending crafted requests that cause a system to crash or take a disproportional amount of time to process.</p>
<p>The Regular expression Denial of Service (ReDoS) is a type of Denial of Service attack. Regular expressions are incredibly powerful, but they aren&#39;t very intuitive and can ultimately end up making it easy for attackers to take your site down.</p>
<p>Lets take the following regular expression as an example:</p>
<pre><code class="language-js">regex = /A(B|C+)+D/
</code></pre>
<p>This regular expression accomplishes the following:</p>
<ul>
<li><code>A</code> The string must start with the letter &#39;A&#39;</li>
<li><code>(B|C+)+</code> The string must then follow the letter A with either the letter &#39;B&#39; or some number of occurrences of the letter &#39;C&#39; (the <code>+</code> matches one or more times). The <code>+</code> at the end of this section states that we can look for one or more matches of this section.</li>
<li><code>D</code> Finally, we ensure this section of the string ends with a &#39;D&#39;</li>
</ul>
<p>The expression would match inputs such as <code>ABBD</code>, <code>ABCCCCD</code>, <code>ABCBCCCD</code> and <code>ACCCCCD</code></p>
<p>It most cases, it doesn&#39;t take very long for a regex engine to find a match:</p>
<pre><code class="language-bash">$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCD&quot;)&#39;
0.04s user 0.01s system 95% cpu 0.052 total
$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCX&quot;)&#39;
1.79s user 0.02s system 99% cpu 1.812 total
</code></pre>
<p>The entire process of testing it against a 30 characters long string takes around ~52ms. But when given an invalid string, it takes nearly two seconds to complete the test, over ten times as long as it took to test a valid string. The dramatic difference is due to the way regular expressions get evaluated.</p>
<p>Most Regex engines will work very similarly (with minor differences). The engine will match the first possible way to accept the current character and proceed to the next one. If it then fails to match the next one, it will backtrack and see if there was another way to digest the previous character. If it goes too far down the rabbit hole only to find out the string doesnt match in the end, and if many characters have multiple valid regex paths, the number of backtracking steps can become very large, resulting in what is known as <em>catastrophic backtracking</em>.</p>
<p>Let&#39;s look at how our expression runs into this problem, using a shorter string: &quot;ACCCX&quot;. While it seems fairly straightforward, there are still four different ways that the engine could match those three C&#39;s:</p>
<ol>
<li>CCC</li>
<li>CC+C</li>
<li>C+CC</li>
<li>C+C+C.</li>
</ol>
<p>The engine has to try each of those combinations to see if any of them potentially match against the expression. When you combine that with the other steps the engine must take, we can use <a href="https://regex101.com/debugger">RegEx 101 debugger</a> to see the engine has to take a total of 38 steps before it can determine the string doesn&#39;t match.</p>
<p>From there, the number of steps the engine must use to validate a string just continues to grow.</p>
<table>
<thead>
<tr>
<th>String</th>
<th align="right">Number of C&#39;s</th>
<th align="right">Number of steps</th>
</tr>
</thead>
<tbody><tr>
<td>ACCCX</td>
<td align="right">3</td>
<td align="right">38</td>
</tr>
<tr>
<td>ACCCCX</td>
<td align="right">4</td>
<td align="right">71</td>
</tr>
<tr>
<td>ACCCCCX</td>
<td align="right">5</td>
<td align="right">136</td>
</tr>
<tr>
<td>ACCCCCCCCCCCCCCX</td>
<td align="right">14</td>
<td align="right">65,553</td>
</tr>
</tbody></table>
<p>By the time the string includes 14 C&#39;s, the engine has to take over 65,000 steps just to see if the string is valid. These extreme situations can cause them to work very slowly (exponentially related to input size, as shown above), allowing an attacker to exploit this and can cause the service to excessively consume CPU, resulting in a Denial of Service.</p>
<h2 id="remediation">Remediation</h2>
<p>Upgrade <code>path-to-regexp</code> to version 8.0.0 or higher.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/29b96b4a1de52824e1ca0f49a701183cc4ed476f">GitHub Commit</a></li>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/60f2121e9b66b7b622cc01080df0aabda9eedee6">GitHub Commit</a></li>
<li><a href="https://blakeembrey.com/posts/2024-09-web-redos/">Vulnerability Write-up</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-JS-PATHTOREGEXP-7925106">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Regular Expression Denial of Service (ReDoS)</h2>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:26:01 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:26:21 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:26:05 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:26:26 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -7,7 +7,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Snyk test report</title>
<meta name="description" content="34 known vulnerabilities found in 236 vulnerable dependency paths.">
<meta name="description" content="33 known vulnerabilities found in 235 vulnerable dependency paths.">
<base target="_blank">
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
sizes="194x194">
@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:26:23 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:26:46 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>
@@ -470,8 +470,8 @@
</div>
<div class="meta-counts">
<div class="meta-count"><span>34</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>236 vulnerable dependency paths</span></div>
<div class="meta-count"><span>33</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>235 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2278</span> <span>dependencies</span></div>
</div><!-- .meta-counts -->
</div><!-- .layout-container--short -->
@@ -2286,80 +2286,6 @@
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2204-CURL-7575743">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">CVE-2024-8096</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: quay.io/argoproj/argocd:v2.10.16/argoproj/argocd <span class="list-paths__item__arrow"></span> Dockerfile
</li>
<li class="card__meta__item">
Package Manager: ubuntu:22.04
</li>
<li class="card__meta__item">
Vulnerable module:
curl/libcurl3-gnutls
</li>
<li class="card__meta__item">Introduced through:
docker-image|quay.io/argoproj/argocd@v2.10.16, git@1:2.34.1-1ubuntu1.11 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
docker-image|quay.io/argoproj/argocd@v2.10.16
<span class="list-paths__item__arrow"></span>
git@1:2.34.1-1ubuntu1.11
<span class="list-paths__item__arrow"></span>
curl/libcurl3-gnutls@7.81.0-1ubuntu1.16
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="nvd-description">NVD Description</h2>
<p><strong><em>Note:</em></strong> <em>Versions mentioned in the description apply only to the upstream <code>curl</code> package and not the <code>curl</code> package as distributed by <code>Ubuntu</code>.</em>
<em>See <code>How to fix?</code> for <code>Ubuntu:22.04</code> relevant fixed versions and status.</em></p>
<p>When curl is told to use the Certificate Status Request TLS extension, often referred to as OCSP stapling, to verify that the server certificate is valid, it might fail to detect some OCSP problems and instead wrongly consider the response as fine. If the returned status reports another error than &#39;revoked&#39; (like for example &#39;unauthorized&#39;) it is not treated as a bad certficate.</p>
<h2 id="remediation">Remediation</h2>
<p>There is no fixed version for <code>Ubuntu:22.04</code> <code>curl</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-8096">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-8096</a></li>
<li><a href="https://curl.se/docs/CVE-2024-8096.html">https://curl.se/docs/CVE-2024-8096.html</a></li>
<li><a href="https://curl.se/docs/CVE-2024-8096.json">https://curl.se/docs/CVE-2024-8096.json</a></li>
<li><a href="https://hackerone.com/reports/2669852">https://hackerone.com/reports/2669852</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2204-CURL-7931918">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--low" data-snyk-test="low">
<h2 class="card__title">CVE-2023-7008</h2>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:26:27 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:26:51 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:25:35 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:25:50 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:25:43 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:26:00 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -7,7 +7,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Snyk test report</title>
<meta name="description" content="6 known vulnerabilities found in 157 vulnerable dependency paths.">
<meta name="description" content="5 known vulnerabilities found in 154 vulnerable dependency paths.">
<base target="_blank">
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
sizes="194x194">
@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:23:42 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:23:52 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>
@@ -467,8 +467,8 @@
</div>
<div class="meta-counts">
<div class="meta-count"><span>6</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>157 vulnerable dependency paths</span></div>
<div class="meta-count"><span>5</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>154 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2041</span> <span>dependencies</span></div>
</div><!-- .meta-counts -->
</div><!-- .layout-container--short -->
@@ -3139,173 +3139,6 @@
<p><a href="https://snyk.io/vuln/SNYK-GOLANG-GOLANGORGXNETHTTP2-6531285">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Regular Expression Denial of Service (ReDoS)</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: /argo-cd <span class="list-paths__item__arrow"></span> ui/yarn.lock
</li>
<li class="card__meta__item">
Package Manager: npm
</li>
<li class="card__meta__item">
Vulnerable module:
path-to-regexp
</li>
<li class="card__meta__item">Introduced through:
argo-cd-ui@1.0.0, react-router@4.3.1 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
argo-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="overview">Overview</h2>
<p>Affected versions of this package are vulnerable to Regular Expression Denial of Service (ReDoS) when including multiple regular expression parameters in a single segment, which will produce the regular expression <code>/^\/([^\/]+?)-([^\/]+?)\/?$/</code>, if two parameters within a single segment are separated by a character other than a <code>/</code> or <code>.</code>. Poor performance will block the event loop and can lead to a DoS.</p>
<p><strong>Note:</strong>
Version 0.1.10 is patched to mitigate this but is also vulnerable if custom regular expressions are used. Due to the existence of this attack vector, the Snyk security team have decided to err on the side of caution in considering the very widely-used v0 branch vulnerable, while the 8.0.0 release has completely eliminated the vulnerable functionality.</p>
<h2 id="workaround">Workaround</h2>
<p>This vulnerability can be avoided by using a custom regular expression for parameters after the first in a segment, which excludes <code>-</code> and <code>/</code>.</p>
<h2 id="poc">PoC</h2>
<pre><code class="language-js">/a${&#39;-a&#39;.repeat(8_000)}/a
</code></pre>
<h2 id="details">Details</h2>
<p>Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its original and legitimate users. There are many types of DoS attacks, ranging from trying to clog the network pipes to the system by generating a large volume of traffic from many machines (a Distributed Denial of Service - DDoS - attack) to sending crafted requests that cause a system to crash or take a disproportional amount of time to process.</p>
<p>The Regular expression Denial of Service (ReDoS) is a type of Denial of Service attack. Regular expressions are incredibly powerful, but they aren&#39;t very intuitive and can ultimately end up making it easy for attackers to take your site down.</p>
<p>Lets take the following regular expression as an example:</p>
<pre><code class="language-js">regex = /A(B|C+)+D/
</code></pre>
<p>This regular expression accomplishes the following:</p>
<ul>
<li><code>A</code> The string must start with the letter &#39;A&#39;</li>
<li><code>(B|C+)+</code> The string must then follow the letter A with either the letter &#39;B&#39; or some number of occurrences of the letter &#39;C&#39; (the <code>+</code> matches one or more times). The <code>+</code> at the end of this section states that we can look for one or more matches of this section.</li>
<li><code>D</code> Finally, we ensure this section of the string ends with a &#39;D&#39;</li>
</ul>
<p>The expression would match inputs such as <code>ABBD</code>, <code>ABCCCCD</code>, <code>ABCBCCCD</code> and <code>ACCCCCD</code></p>
<p>It most cases, it doesn&#39;t take very long for a regex engine to find a match:</p>
<pre><code class="language-bash">$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCD&quot;)&#39;
0.04s user 0.01s system 95% cpu 0.052 total
$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCX&quot;)&#39;
1.79s user 0.02s system 99% cpu 1.812 total
</code></pre>
<p>The entire process of testing it against a 30 characters long string takes around ~52ms. But when given an invalid string, it takes nearly two seconds to complete the test, over ten times as long as it took to test a valid string. The dramatic difference is due to the way regular expressions get evaluated.</p>
<p>Most Regex engines will work very similarly (with minor differences). The engine will match the first possible way to accept the current character and proceed to the next one. If it then fails to match the next one, it will backtrack and see if there was another way to digest the previous character. If it goes too far down the rabbit hole only to find out the string doesnt match in the end, and if many characters have multiple valid regex paths, the number of backtracking steps can become very large, resulting in what is known as <em>catastrophic backtracking</em>.</p>
<p>Let&#39;s look at how our expression runs into this problem, using a shorter string: &quot;ACCCX&quot;. While it seems fairly straightforward, there are still four different ways that the engine could match those three C&#39;s:</p>
<ol>
<li>CCC</li>
<li>CC+C</li>
<li>C+CC</li>
<li>C+C+C.</li>
</ol>
<p>The engine has to try each of those combinations to see if any of them potentially match against the expression. When you combine that with the other steps the engine must take, we can use <a href="https://regex101.com/debugger">RegEx 101 debugger</a> to see the engine has to take a total of 38 steps before it can determine the string doesn&#39;t match.</p>
<p>From there, the number of steps the engine must use to validate a string just continues to grow.</p>
<table>
<thead>
<tr>
<th>String</th>
<th align="right">Number of C&#39;s</th>
<th align="right">Number of steps</th>
</tr>
</thead>
<tbody><tr>
<td>ACCCX</td>
<td align="right">3</td>
<td align="right">38</td>
</tr>
<tr>
<td>ACCCCX</td>
<td align="right">4</td>
<td align="right">71</td>
</tr>
<tr>
<td>ACCCCCX</td>
<td align="right">5</td>
<td align="right">136</td>
</tr>
<tr>
<td>ACCCCCCCCCCCCCCX</td>
<td align="right">14</td>
<td align="right">65,553</td>
</tr>
</tbody></table>
<p>By the time the string includes 14 C&#39;s, the engine has to take over 65,000 steps just to see if the string is valid. These extreme situations can cause them to work very slowly (exponentially related to input size, as shown above), allowing an attacker to exploit this and can cause the service to excessively consume CPU, resulting in a Denial of Service.</p>
<h2 id="remediation">Remediation</h2>
<p>Upgrade <code>path-to-regexp</code> to version 8.0.0 or higher.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/29b96b4a1de52824e1ca0f49a701183cc4ed476f">GitHub Commit</a></li>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/60f2121e9b66b7b622cc01080df0aabda9eedee6">GitHub Commit</a></li>
<li><a href="https://blakeembrey.com/posts/2024-09-web-redos/">Vulnerability Write-up</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-JS-PATHTOREGEXP-7925106">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Denial of Service (DoS)</h2>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:23:48 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:23:58 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:23:53 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:24:05 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:24:16 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:24:29 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:23:20 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:23:27 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:23:29 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:23:37 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -7,7 +7,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Snyk test report</title>
<meta name="description" content="4 known vulnerabilities found in 6 vulnerable dependency paths.">
<meta name="description" content="3 known vulnerabilities found in 3 vulnerable dependency paths.">
<base target="_blank">
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
sizes="194x194">
@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:21:28 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:21:25 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>
@@ -467,8 +467,8 @@
</div>
<div class="meta-counts">
<div class="meta-count"><span>4</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>6 vulnerable dependency paths</span></div>
<div class="meta-count"><span>3</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>3 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2061</span> <span>dependencies</span></div>
</div><!-- .meta-counts -->
</div><!-- .layout-container--short -->
@@ -477,173 +477,6 @@
<div class="layout-container" style="padding-top: 35px;">
<div class="cards--vuln filter--patch filter--ignore">
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Regular Expression Denial of Service (ReDoS)</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: /argo-cd <span class="list-paths__item__arrow"></span> ui/yarn.lock
</li>
<li class="card__meta__item">
Package Manager: npm
</li>
<li class="card__meta__item">
Vulnerable module:
path-to-regexp
</li>
<li class="card__meta__item">Introduced through:
argo-cd-ui@1.0.0, react-router@4.3.1 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
argo-cd-ui@1.0.0
<span class="list-paths__item__arrow"></span>
argo-ui@1.0.0
<span class="list-paths__item__arrow"></span>
react-router-dom@4.3.1
<span class="list-paths__item__arrow"></span>
react-router@4.3.1
<span class="list-paths__item__arrow"></span>
path-to-regexp@1.8.0
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="overview">Overview</h2>
<p>Affected versions of this package are vulnerable to Regular Expression Denial of Service (ReDoS) when including multiple regular expression parameters in a single segment, which will produce the regular expression <code>/^\/([^\/]+?)-([^\/]+?)\/?$/</code>, if two parameters within a single segment are separated by a character other than a <code>/</code> or <code>.</code>. Poor performance will block the event loop and can lead to a DoS.</p>
<p><strong>Note:</strong>
Version 0.1.10 is patched to mitigate this but is also vulnerable if custom regular expressions are used. Due to the existence of this attack vector, the Snyk security team have decided to err on the side of caution in considering the very widely-used v0 branch vulnerable, while the 8.0.0 release has completely eliminated the vulnerable functionality.</p>
<h2 id="workaround">Workaround</h2>
<p>This vulnerability can be avoided by using a custom regular expression for parameters after the first in a segment, which excludes <code>-</code> and <code>/</code>.</p>
<h2 id="poc">PoC</h2>
<pre><code class="language-js">/a${&#39;-a&#39;.repeat(8_000)}/a
</code></pre>
<h2 id="details">Details</h2>
<p>Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its original and legitimate users. There are many types of DoS attacks, ranging from trying to clog the network pipes to the system by generating a large volume of traffic from many machines (a Distributed Denial of Service - DDoS - attack) to sending crafted requests that cause a system to crash or take a disproportional amount of time to process.</p>
<p>The Regular expression Denial of Service (ReDoS) is a type of Denial of Service attack. Regular expressions are incredibly powerful, but they aren&#39;t very intuitive and can ultimately end up making it easy for attackers to take your site down.</p>
<p>Lets take the following regular expression as an example:</p>
<pre><code class="language-js">regex = /A(B|C+)+D/
</code></pre>
<p>This regular expression accomplishes the following:</p>
<ul>
<li><code>A</code> The string must start with the letter &#39;A&#39;</li>
<li><code>(B|C+)+</code> The string must then follow the letter A with either the letter &#39;B&#39; or some number of occurrences of the letter &#39;C&#39; (the <code>+</code> matches one or more times). The <code>+</code> at the end of this section states that we can look for one or more matches of this section.</li>
<li><code>D</code> Finally, we ensure this section of the string ends with a &#39;D&#39;</li>
</ul>
<p>The expression would match inputs such as <code>ABBD</code>, <code>ABCCCCD</code>, <code>ABCBCCCD</code> and <code>ACCCCCD</code></p>
<p>It most cases, it doesn&#39;t take very long for a regex engine to find a match:</p>
<pre><code class="language-bash">$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCD&quot;)&#39;
0.04s user 0.01s system 95% cpu 0.052 total
$ time node -e &#39;/A(B|C+)+D/.test(&quot;ACCCCCCCCCCCCCCCCCCCCCCCCCCCCX&quot;)&#39;
1.79s user 0.02s system 99% cpu 1.812 total
</code></pre>
<p>The entire process of testing it against a 30 characters long string takes around ~52ms. But when given an invalid string, it takes nearly two seconds to complete the test, over ten times as long as it took to test a valid string. The dramatic difference is due to the way regular expressions get evaluated.</p>
<p>Most Regex engines will work very similarly (with minor differences). The engine will match the first possible way to accept the current character and proceed to the next one. If it then fails to match the next one, it will backtrack and see if there was another way to digest the previous character. If it goes too far down the rabbit hole only to find out the string doesnt match in the end, and if many characters have multiple valid regex paths, the number of backtracking steps can become very large, resulting in what is known as <em>catastrophic backtracking</em>.</p>
<p>Let&#39;s look at how our expression runs into this problem, using a shorter string: &quot;ACCCX&quot;. While it seems fairly straightforward, there are still four different ways that the engine could match those three C&#39;s:</p>
<ol>
<li>CCC</li>
<li>CC+C</li>
<li>C+CC</li>
<li>C+C+C.</li>
</ol>
<p>The engine has to try each of those combinations to see if any of them potentially match against the expression. When you combine that with the other steps the engine must take, we can use <a href="https://regex101.com/debugger">RegEx 101 debugger</a> to see the engine has to take a total of 38 steps before it can determine the string doesn&#39;t match.</p>
<p>From there, the number of steps the engine must use to validate a string just continues to grow.</p>
<table>
<thead>
<tr>
<th>String</th>
<th align="right">Number of C&#39;s</th>
<th align="right">Number of steps</th>
</tr>
</thead>
<tbody><tr>
<td>ACCCX</td>
<td align="right">3</td>
<td align="right">38</td>
</tr>
<tr>
<td>ACCCCX</td>
<td align="right">4</td>
<td align="right">71</td>
</tr>
<tr>
<td>ACCCCCX</td>
<td align="right">5</td>
<td align="right">136</td>
</tr>
<tr>
<td>ACCCCCCCCCCCCCCX</td>
<td align="right">14</td>
<td align="right">65,553</td>
</tr>
</tbody></table>
<p>By the time the string includes 14 C&#39;s, the engine has to take over 65,000 steps just to see if the string is valid. These extreme situations can cause them to work very slowly (exponentially related to input size, as shown above), allowing an attacker to exploit this and can cause the service to excessively consume CPU, resulting in a Denial of Service.</p>
<h2 id="remediation">Remediation</h2>
<p>Upgrade <code>path-to-regexp</code> to version 8.0.0 or higher.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/29b96b4a1de52824e1ca0f49a701183cc4ed476f">GitHub Commit</a></li>
<li><a href="https://github.com/pillarjs/path-to-regexp/commit/60f2121e9b66b7b622cc01080df0aabda9eedee6">GitHub Commit</a></li>
<li><a href="https://blakeembrey.com/posts/2024-09-web-redos/">Vulnerability Write-up</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-JS-PATHTOREGEXP-7925106">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">Denial of Service (DoS)</h2>
<div class="card__section">

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:21:35 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:21:34 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:21:38 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:21:39 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following path:</span>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:21:42 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:21:44 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -7,7 +7,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Snyk test report</title>
<meta name="description" content="17 known vulnerabilities found in 81 vulnerable dependency paths.">
<meta name="description" content="16 known vulnerabilities found in 80 vulnerable dependency paths.">
<base target="_blank">
<link rel="icon" type="image/png" href="https://res.cloudinary.com/snyk/image/upload/v1468845142/favicon/favicon.png"
sizes="194x194">
@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:21:58 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:22:02 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>
@@ -470,8 +470,8 @@
</div>
<div class="meta-counts">
<div class="meta-count"><span>17</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>81 vulnerable dependency paths</span></div>
<div class="meta-count"><span>16</span> <span>known vulnerabilities</span></div>
<div class="meta-count"><span>80 vulnerable dependency paths</span></div>
<div class="meta-count"><span>2292</span> <span>dependencies</span></div>
</div><!-- .meta-counts -->
</div><!-- .layout-container--short -->
@@ -1435,7 +1435,7 @@
<em>See <code>How to fix?</code> for <code>Ubuntu:24.04</code> relevant fixed versions and status.</em></p>
<p>An issue was discovered in libexpat before 2.6.3. dtdCopy in xmlparse.c can have an integer overflow for nDefaultAtts on 32-bit platforms (where UINT_MAX equals SIZE_MAX).</p>
<h2 id="remediation">Remediation</h2>
<p>Upgrade <code>Ubuntu:24.04</code> <code>expat</code> to version 2.6.1-2ubuntu0.1 or higher.</p>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>expat</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45491">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45491</a></li>
@@ -1508,7 +1508,7 @@
<em>See <code>How to fix?</code> for <code>Ubuntu:24.04</code> relevant fixed versions and status.</em></p>
<p>An issue was discovered in libexpat before 2.6.3. xmlparse.c does not reject a negative length for XML_ParseBuffer.</p>
<h2 id="remediation">Remediation</h2>
<p>Upgrade <code>Ubuntu:24.04</code> <code>expat</code> to version 2.6.1-2ubuntu0.1 or higher.</p>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>expat</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45490">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45490</a></li>
@@ -1581,7 +1581,7 @@
<em>See <code>How to fix?</code> for <code>Ubuntu:24.04</code> relevant fixed versions and status.</em></p>
<p>An issue was discovered in libexpat before 2.6.3. nextScaffoldPart in xmlparse.c can have an integer overflow for m_groupSize on 32-bit platforms (where UINT_MAX equals SIZE_MAX).</p>
<h2 id="remediation">Remediation</h2>
<p>Upgrade <code>Ubuntu:24.04</code> <code>expat</code> to version 2.6.1-2ubuntu0.1 or higher.</p>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>expat</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45492">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-45492</a></li>
@@ -1595,80 +1595,6 @@
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2404-EXPAT-7885595">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--medium" data-snyk-test="medium">
<h2 class="card__title">CVE-2024-8096</h2>
<div class="card__section">
<div class="label label--medium">
<span class="label__text">medium severity</span>
</div>
<hr/>
<ul class="card__meta">
<li class="card__meta__item">
Manifest file: quay.io/argoproj/argocd:v2.12.3/argoproj/argocd <span class="list-paths__item__arrow"></span> Dockerfile
</li>
<li class="card__meta__item">
Package Manager: ubuntu:24.04
</li>
<li class="card__meta__item">
Vulnerable module:
curl/libcurl3t64-gnutls
</li>
<li class="card__meta__item">Introduced through:
docker-image|quay.io/argoproj/argocd@v2.12.3, git@1:2.43.0-1ubuntu7.1 and others
</li>
</ul>
<hr/>
<h3 class="card__section__title">Detailed paths</h3>
<ul class="card__meta__paths">
<li>
<span class="list-paths__item__introduced"><em>Introduced through</em>:
docker-image|quay.io/argoproj/argocd@v2.12.3
<span class="list-paths__item__arrow"></span>
git@1:2.43.0-1ubuntu7.1
<span class="list-paths__item__arrow"></span>
curl/libcurl3t64-gnutls@8.5.0-2ubuntu10.3
</span>
</li>
</ul><!-- .list-paths -->
</div><!-- .card__section -->
<hr/>
<!-- Overview -->
<h2 id="nvd-description">NVD Description</h2>
<p><strong><em>Note:</em></strong> <em>Versions mentioned in the description apply only to the upstream <code>curl</code> package and not the <code>curl</code> package as distributed by <code>Ubuntu</code>.</em>
<em>See <code>How to fix?</code> for <code>Ubuntu:24.04</code> relevant fixed versions and status.</em></p>
<p>When curl is told to use the Certificate Status Request TLS extension, often referred to as OCSP stapling, to verify that the server certificate is valid, it might fail to detect some OCSP problems and instead wrongly consider the response as fine. If the returned status reports another error than &#39;revoked&#39; (like for example &#39;unauthorized&#39;) it is not treated as a bad certficate.</p>
<h2 id="remediation">Remediation</h2>
<p>There is no fixed version for <code>Ubuntu:24.04</code> <code>curl</code>.</p>
<h2 id="references">References</h2>
<ul>
<li><a href="http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-8096">http://people.ubuntu.com/~ubuntu-security/cve/CVE-2024-8096</a></li>
<li><a href="https://curl.se/docs/CVE-2024-8096.html">https://curl.se/docs/CVE-2024-8096.html</a></li>
<li><a href="https://curl.se/docs/CVE-2024-8096.json">https://curl.se/docs/CVE-2024-8096.json</a></li>
<li><a href="https://hackerone.com/reports/2669852">https://hackerone.com/reports/2669852</a></li>
</ul>
<hr/>
<div class="cta card__cta">
<p><a href="https://snyk.io/vuln/SNYK-UBUNTU2404-CURL-7931919">More about this vulnerability</a></p>
</div>
</div><!-- .card -->
<div class="card card--vuln disclosure--not-new severity--low" data-snyk-test="low">
<h2 class="card__title">Release of Invalid Pointer or Reference</h2>

View File

@@ -456,7 +456,7 @@
<div class="header-wrap">
<h1 class="project__header__title">Snyk test report</h1>
<p class="timestamp">September 15th 2024, 12:22:01 am (UTC+00:00)</p>
<p class="timestamp">September 8th 2024, 12:22:06 am (UTC+00:00)</p>
</div>
<div class="source-panel">
<span>Scanned the following paths:</span>

View File

@@ -8,7 +8,6 @@
| `ARGOCD_APP_NAMESPACE` | The destination namespace of the application. |
| `ARGOCD_APP_REVISION` | The resolved revision, e.g. `f913b6cbf58aa5ae5ca1f8a2b149477aebcbd9d8`. |
| `ARGOCD_APP_REVISION_SHORT` | The resolved short revision, e.g. `f913b6c`. |
| `ARGOCD_APP_REVISION_SHORT_8` | The resolved short revision with length 8, e.g. `f913b6cb`. |
| `ARGOCD_APP_SOURCE_PATH` | The path of the app within the source repo. |
| `ARGOCD_APP_SOURCE_REPO_URL` | The source repo URL. |
| `ARGOCD_APP_SOURCE_TARGET_REVISION` | The target revision from the spec, e.g. `master`. |

View File

@@ -12,7 +12,7 @@ argocd [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -53,7 +53,7 @@ argocd account [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -26,7 +26,7 @@ argocd account bcrypt --password YOUR_PASSWORD
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -36,7 +36,7 @@ Resources: [clusters projects applications applicationsets repositories certific
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -29,7 +29,7 @@ argocd account delete-token --account <account-name> ID
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -31,7 +31,7 @@ argocd account generate-token --account <account-name>
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -29,7 +29,7 @@ argocd account get-user-info [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -30,7 +30,7 @@ argocd account get --account <account-name>
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -25,7 +25,7 @@ argocd account list
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -41,7 +41,7 @@ argocd account update-password [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -31,7 +31,7 @@ $ argocd admin initial-password reset
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -33,7 +33,7 @@ argocd admin app get-reconcile-results APPNAME
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -18,7 +18,7 @@ argocd admin app diff-reconcile-results PATH1 PATH2 [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -107,7 +107,7 @@ argocd admin app generate-spec APPNAME [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -44,7 +44,7 @@ argocd admin app get-reconcile-results PATH [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -32,7 +32,7 @@ argocd admin cluster namespaces my-cluster
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -41,7 +41,7 @@ argocd admin cluster generate-spec CONTEXT [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -55,7 +55,7 @@ argocd admin cluster kubeconfig https://cluster-api-url:6443 /path/to/output/kub
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -38,7 +38,7 @@ argocd admin cluster namespaces [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -39,7 +39,7 @@ argocd admin cluster namespaces disable-namespaced-mode PATTERN [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -41,7 +41,7 @@ argocd admin cluster namespaces enable-namespaced-mode PATTERN [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -54,7 +54,7 @@ argocd admin cluster shards [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -68,7 +68,7 @@ argocd admin cluster stats target-cluster
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -55,7 +55,7 @@ $ argocd admin dashboard --redis-compress gzip
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

View File

@@ -41,7 +41,7 @@ argocd admin export [flags]
```
--argocd-context string The name of the Argo-CD server context to use
--auth-token string Authentication token; set this or the ARGOCD_AUTH_TOKEN environment variable
--auth-token string Authentication token
--client-crt string Client certificate file
--client-crt-key string Client certificate key file
--config string Path to Argo CD config (default "/home/user/.config/argocd/config")

Some files were not shown because too many files have changed in this diff Show More