mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 01:28:45 +01:00
Compare commits
49 Commits
renovate/d
...
326a1dbd6b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
326a1dbd6b | ||
|
|
d0b2a6cfd7 | ||
|
|
e464f6ae43 | ||
|
|
4b0a2c0ef2 | ||
|
|
8449d9a0f3 | ||
|
|
92df21cfc0 | ||
|
|
24493145a6 | ||
|
|
273683b647 | ||
|
|
a1d18559f5 | ||
|
|
8df5e96981 | ||
|
|
c4f0cd3e84 | ||
|
|
0038fce14d | ||
|
|
6f270cc8f4 | ||
|
|
61267982ab | ||
|
|
445916fdb0 | ||
|
|
54f29167a6 | ||
|
|
55d0d09802 | ||
|
|
2502af402d | ||
|
|
fd6b7d5b3c | ||
|
|
f4e479e3f0 | ||
|
|
436da4e7d8 | ||
|
|
cd6a9aaf3f | ||
|
|
ac071b57a1 | ||
|
|
3d64c21206 | ||
|
|
0fa47b11b2 | ||
|
|
48a9dcc23b | ||
|
|
b52a0750b2 | ||
|
|
8fbb44c336 | ||
|
|
28e8472c69 | ||
|
|
a6472c8393 | ||
|
|
74de77a24c | ||
|
|
15568cb9d5 | ||
|
|
32c32a67cb | ||
|
|
675f8cfe3f | ||
|
|
9ae26e4e74 | ||
|
|
369fb7577e | ||
|
|
efca5b9144 | ||
|
|
2c3bc6f991 | ||
|
|
8639b7be5e | ||
|
|
5de1e6472d | ||
|
|
51b595b1ee | ||
|
|
fe0466de51 | ||
|
|
05b416906e | ||
|
|
20604f1b21 | ||
|
|
fd2d0adae9 | ||
|
|
708c63683c | ||
|
|
393cb97042 | ||
|
|
99434863c9 | ||
|
|
814db444c3 |
19
.github/workflows/ci-build.yaml
vendored
19
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.3'
|
||||
GOLANG_VERSION: '1.25.5'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -194,7 +194,7 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
@@ -271,13 +271,13 @@ jobs:
|
||||
# We need to vendor go modules for codegen yet
|
||||
go mod download
|
||||
go mod vendor -v
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
- name: Install toolchain for codegen
|
||||
run: |
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
@@ -418,22 +418,19 @@ jobs:
|
||||
# latest: true means that this version mush upload the coverage report to codecov.io
|
||||
# We designate the latest version because we only collect code coverage for that version.
|
||||
k3s:
|
||||
- version: v1.33.1
|
||||
- version: v1.34.2
|
||||
latest: true
|
||||
- version: v1.33.1
|
||||
latest: false
|
||||
- version: v1.32.1
|
||||
latest: false
|
||||
- version: v1.31.0
|
||||
latest: false
|
||||
- version: v1.30.4
|
||||
latest: false
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
ARGOCD_FAKE_IN_CLUSTER: 'true'
|
||||
ARGOCD_SSH_DATA_PATH: '/tmp/argo-e2e/app/config/ssh'
|
||||
ARGOCD_TLS_DATA_PATH: '/tmp/argo-e2e/app/config/tls'
|
||||
ARGOCD_E2E_SSH_KNOWN_HOSTS: '../fixture/certs/ssh_known_hosts'
|
||||
ARGOCD_E2E_K3S: 'true'
|
||||
ARGOCD_IN_CI: 'true'
|
||||
ARGOCD_E2E_APISERVER_PORT: '8088'
|
||||
|
||||
4
.github/workflows/image.yaml
vendored
4
.github/workflows/image.yaml
vendored
@@ -86,7 +86,7 @@ jobs:
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.5
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
ghcr_image_name: ${{ needs.set-vars.outputs.ghcr_image_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.5
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
4
.github/workflows/release.yaml
vendored
4
.github/workflows/release.yaml
vendored
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.3' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.25.5' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
quay_image_name: ${{ needs.setup-variables.outputs.quay_image_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.5
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
2
.github/workflows/renovate.yaml
vendored
2
.github/workflows/renovate.yaml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.25.5
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@5712c6a41dea6cdf32c72d92a763bd417e6606aa #44.0.5
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:27771fb7b40a58237c98e8d3e6b9ecdd9289cec69a857fccfb85ff36294dac20
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.10@sha256:5922638447b1e3ba114332c896a2c7288c876bb94adec923d70d58a17d2fec5e
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.25.3@sha256:6d4e5e74f47db00f7f24da5f53c1b4198ae46862a47395e30477365458347bf2 AS builder
|
||||
FROM docker.io/library/golang:1.25.5@sha256:31c1e53dfc1cc2d269deec9c83f58729fa3c53dc9a576f6426109d1e319e9e9a AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.3@sha256:6d4e5e74f47db00f7f24da5f53c1b4198ae46862a47395e30477365458347bf2 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.5@sha256:31c1e53dfc1cc2d269deec9c83f58729fa3c53dc9a576f6426109d1e319e9e9a AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.25.3@sha256:6d4e5e74f47db00f7f24da5f53c1b4198ae46862a47395e30477365458347bf2
|
||||
FROM docker.io/library/golang:1.25.5@sha256:31c1e53dfc1cc2d269deec9c83f58729fa3c53dc9a576f6426109d1e319e9e9a
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
28
Makefile
28
Makefile
@@ -76,8 +76,10 @@ ARGOCD_E2E_REDIS_PORT?=6379
|
||||
ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
ARGOCD_E2E_DIR?=/tmp/argo-e2e
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
ARGOCD_E2E_RERUN_FAILS?=5
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
@@ -461,7 +463,7 @@ test-e2e:
|
||||
test-e2e-local: cli-local
|
||||
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
|
||||
export GO111MODULE=off
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=$(ARGOCD_E2E_RERUN_FAILS) PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
|
||||
# Spawns a shell in the test server container for debugging purposes
|
||||
debug-test-server: test-tools-image
|
||||
@@ -485,13 +487,13 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
kubectl create ns argocd-e2e-external || true
|
||||
kubectl create ns argocd-e2e-external-2 || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply --server-side -f -
|
||||
kustomize build test/manifests/base | kubectl apply --server-side --force-conflicts -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
|
||||
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
|
||||
if test -d $(ARGOCD_E2E_DIR)/app/config/gpg; then rm -rf $(ARGOCD_E2E_DIR)/app/config/gpg/*; fi
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/keys && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/keys
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/source && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/source
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/plugin && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/plugin
|
||||
# create folders to hold go coverage results for each component
|
||||
mkdir -p /tmp/coverage/app-controller
|
||||
mkdir -p /tmp/coverage/api-server
|
||||
@@ -500,13 +502,15 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
mkdir -p /tmp/coverage/notification
|
||||
mkdir -p /tmp/coverage/commit-server
|
||||
# set paths for locally managed ssh known hosts and tls certs data
|
||||
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_E2E_DIR=$(ARGOCD_E2E_DIR) \
|
||||
ARGOCD_SSH_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=$(ARGOCD_E2E_DIR)/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_GIT_CONFIG=$(PWD)/test/e2e/fixture/gitconfig \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
|
||||
4
Procfile
4
Procfile
@@ -2,7 +2,7 @@ controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v3/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: hack/start-redis-with-password.sh
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "export PATH=./dist:\$PATH && [ -n \"\$ARGOCD_GIT_CONFIG\" ] && export GIT_CONFIG_GLOBAL=\$ARGOCD_GIT_CONFIG && export GIT_CONFIG_NOSYSTEM=1; GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
commit-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/commit-server} FORCE_LOG_COLORS=1 ARGOCD_BINARY_NAME=argocd-commit-server $COMMAND --loglevel debug --port ${ARGOCD_E2E_COMMITSERVER_PORT:-8086}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
@@ -11,4 +11,4 @@ helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
oci-registry: test/fixture/testrepos/start-authenticated-helm-registry.sh
|
||||
dev-mounter: [ "$ARGOCD_E2E_TEST" != "true" ] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/applicationset-controller} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
|
||||
@@ -669,8 +669,9 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
Watches(
|
||||
&corev1.Secret{},
|
||||
&clusterSecretEventHandler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: r.ApplicationSetNamespaces,
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -22,8 +23,9 @@ import (
|
||||
// requeue any related ApplicationSets.
|
||||
type clusterSecretEventHandler struct {
|
||||
// handler.EnqueueRequestForOwner
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
ApplicationSetNamespaces []string
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
@@ -68,6 +70,10 @@ func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Contex
|
||||
|
||||
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
|
||||
for _, appSet := range appSetList.Items {
|
||||
if !utils.IsNamespaceAllowed(h.ApplicationSetNamespaces, appSet.GetNamespace()) {
|
||||
// Ignore it as not part of the allowed list of namespaces in which to watch Appsets
|
||||
continue
|
||||
}
|
||||
foundClusterGenerator := false
|
||||
for _, generator := range appSet.Spec.Generators {
|
||||
if generator.Clusters != nil {
|
||||
|
||||
@@ -137,7 +137,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
@@ -171,9 +171,37 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cluster generators in other namespaces should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "my-namespace-not-allowed",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
@@ -552,8 +580,9 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
|
||||
|
||||
handler := &clusterSecretEventHandler{
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: []string{"argocd"},
|
||||
}
|
||||
|
||||
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
|
||||
|
||||
209
commitserver/commit/addnote_race_test.go
Normal file
209
commitserver/commit/addnote_race_test.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/git"
|
||||
)
|
||||
|
||||
// TestAddNoteConcurrentStaggered tests that when multiple AddNote operations run
|
||||
// with slightly staggered timing, all notes persist correctly.
|
||||
// Each operation gets its own git clone, simulating multiple concurrent hydration requests.
|
||||
func TestAddNoteConcurrentStaggered(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
remotePath, localPath := setupRepoWithRemote(t)
|
||||
|
||||
// Create 3 branches with commits (simulating different hydration targets)
|
||||
branches := []string{"env/dev", "env/staging", "env/prod"}
|
||||
commitSHAs := make([]string, 3)
|
||||
|
||||
for i, branch := range branches {
|
||||
commitSHAs[i] = commitAndPushBranch(t, localPath, branch)
|
||||
}
|
||||
|
||||
// Create separate clones for concurrent operations
|
||||
cloneClients := make([]git.Client, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
cloneClients[i] = getClientForClone(t, remotePath)
|
||||
}
|
||||
|
||||
// Add notes concurrently with slight stagger
|
||||
var wg sync.WaitGroup
|
||||
errors := make([]error, 3)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
time.Sleep(time.Duration(idx*50) * time.Millisecond)
|
||||
errors[idx] = AddNote(cloneClients[idx], fmt.Sprintf("dry-sha-%d", idx), commitSHAs[idx])
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Verify all notes persisted
|
||||
verifyClient := getClientForClone(t, remotePath)
|
||||
|
||||
for i, commitSHA := range commitSHAs {
|
||||
note, err := verifyClient.GetCommitNote(commitSHA, NoteNamespace)
|
||||
require.NoError(t, err, "Note should exist for commit %d", i)
|
||||
assert.Contains(t, note, fmt.Sprintf("dry-sha-%d", i))
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddNoteConcurrentSimultaneous tests that when multiple AddNote operations run
|
||||
// simultaneously (without delays), all notes persist correctly.
|
||||
// Each operation gets its own git clone, simulating multiple concurrent hydration requests.
|
||||
func TestAddNoteConcurrentSimultaneous(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
remotePath, localPath := setupRepoWithRemote(t)
|
||||
|
||||
// Create 3 branches with commits (simulating different hydration targets)
|
||||
branches := []string{"env/dev", "env/staging", "env/prod"}
|
||||
commitSHAs := make([]string, 3)
|
||||
|
||||
for i, branch := range branches {
|
||||
commitSHAs[i] = commitAndPushBranch(t, localPath, branch)
|
||||
}
|
||||
|
||||
// Create separate clones for concurrent operations
|
||||
cloneClients := make([]git.Client, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
cloneClients[i] = getClientForClone(t, remotePath)
|
||||
}
|
||||
|
||||
// Add notes concurrently without delays
|
||||
var wg sync.WaitGroup
|
||||
startChan := make(chan struct{})
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
<-startChan
|
||||
_ = AddNote(cloneClients[idx], fmt.Sprintf("dry-sha-%d", idx), commitSHAs[idx])
|
||||
}(i)
|
||||
}
|
||||
|
||||
close(startChan)
|
||||
wg.Wait()
|
||||
|
||||
// Verify all notes persisted
|
||||
verifyClient := getClientForClone(t, remotePath)
|
||||
|
||||
for i, commitSHA := range commitSHAs {
|
||||
note, err := verifyClient.GetCommitNote(commitSHA, NoteNamespace)
|
||||
require.NoError(t, err, "Note should exist for commit %d", i)
|
||||
assert.Contains(t, note, fmt.Sprintf("dry-sha-%d", i))
|
||||
}
|
||||
}
|
||||
|
||||
// setupRepoWithRemote creates a bare remote repo and a local repo configured to push to it.
|
||||
// Returns the remote path and local path.
|
||||
func setupRepoWithRemote(t *testing.T) (remotePath, localPath string) {
|
||||
t.Helper()
|
||||
ctx := t.Context()
|
||||
|
||||
// Create bare remote repository
|
||||
remoteDir := t.TempDir()
|
||||
remotePath = filepath.Join(remoteDir, "remote.git")
|
||||
err := os.MkdirAll(remotePath, 0o755)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, remotePath, "init", "--bare")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local repository
|
||||
localDir := t.TempDir()
|
||||
localPath = filepath.Join(localDir, "local")
|
||||
err = os.MkdirAll(localPath, 0o755)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "init")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "config", "user.name", "Test User")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "config", "user.email", "test@example.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "remote", "add", "origin", remotePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
return remotePath, localPath
|
||||
}
|
||||
|
||||
// commitAndPushBranch writes a file, commits it, creates a branch, and pushes to remote.
|
||||
// Returns the commit SHA.
|
||||
func commitAndPushBranch(t *testing.T, localPath, branch string) string {
|
||||
t.Helper()
|
||||
ctx := t.Context()
|
||||
|
||||
testFile := filepath.Join(localPath, "test.txt")
|
||||
err := os.WriteFile(testFile, []byte("content for "+branch), 0o644)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "add", ".")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "commit", "-m", "commit "+branch)
|
||||
require.NoError(t, err)
|
||||
|
||||
sha, err := runGitCmd(ctx, localPath, "rev-parse", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "branch", branch)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, localPath, "push", "origin", branch)
|
||||
require.NoError(t, err)
|
||||
|
||||
return sha
|
||||
}
|
||||
|
||||
// getClientForClone creates a git client with a fresh clone of the remote repo.
|
||||
func getClientForClone(t *testing.T, remotePath string) git.Client {
|
||||
t.Helper()
|
||||
ctx := t.Context()
|
||||
|
||||
workDir := t.TempDir()
|
||||
|
||||
client, err := git.NewClientExt(remotePath, workDir, &git.NopCreds{}, false, false, "", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, workDir, "config", "user.name", "Test User")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = runGitCmd(ctx, workDir, "config", "user.email", "test@example.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Fetch("", 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// runGitCmd is a helper function to run git commands
|
||||
func runGitCmd(ctx context.Context, dir string, args ...string) (string, error) {
|
||||
cmd := exec.CommandContext(ctx, "git", args...)
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.CombinedOutput()
|
||||
return strings.TrimSpace(string(output)), err
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydr
|
||||
// short-circuit if already hydrated
|
||||
if isHydrated {
|
||||
logCtx.Debugf("this dry sha %s is already hydrated", r.DrySha)
|
||||
return "", "", nil
|
||||
return "", hydratedSha, nil
|
||||
}
|
||||
|
||||
logCtx.Debug("Writing manifests")
|
||||
@@ -197,13 +197,14 @@ func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydr
|
||||
return "", "", fmt.Errorf("failed to write manifests: %w", err)
|
||||
}
|
||||
if !shouldCommit {
|
||||
// add the note and return
|
||||
// Manifests did not change, so we don't need to create a new commit.
|
||||
// Add a git note to track that this dry SHA has been processed, and return the existing hydrated SHA.
|
||||
logCtx.Debug("Adding commit note")
|
||||
err = AddNote(gitClient, r.DrySha, hydratedSha)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to add commit note: %w", err)
|
||||
}
|
||||
return "", "", nil
|
||||
return "", hydratedSha, nil
|
||||
}
|
||||
logCtx.Debug("Committing and pushing changes")
|
||||
out, err = gitClient.CommitAndPush(r.TargetBranch, r.CommitMessage)
|
||||
|
||||
@@ -108,7 +108,7 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), validRequest)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha) // changes introduced by commit note. hydration won't happen if there are no new manifest|s to commit
|
||||
assert.Equal(t, "it-worked!", resp.HydratedSha, "Should return existing hydrated SHA for no-op")
|
||||
})
|
||||
|
||||
t.Run("root path with dot and blank - no directory removal", func(t *testing.T) {
|
||||
@@ -283,12 +283,13 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
DrySha: "dry-sha-456",
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithEmptyPaths)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha) // changes introduced by commit note. hydration won't happen if there are no new manifest|s to commit
|
||||
assert.Equal(t, "empty-paths-sha", resp.HydratedSha, "Should return existing hydrated SHA for no-op")
|
||||
})
|
||||
|
||||
t.Run("duplicate request already hydrated", func(t *testing.T) {
|
||||
@@ -329,7 +330,7 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), request)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha) // changes introduced by commit note. hydration won't happen if there are no new manifest|s to commit
|
||||
assert.Equal(t, "dupe-test-sha", resp.HydratedSha, "Should return existing hydrated SHA when already hydrated")
|
||||
})
|
||||
|
||||
t.Run("root path with dot - no changes to manifest - should commit note only", func(t *testing.T) {
|
||||
@@ -355,6 +356,7 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
DrySha: "dry-sha-123",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: ".",
|
||||
@@ -370,7 +372,8 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithRootAndBlank)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Empty(t, resp.HydratedSha)
|
||||
// BUG FIX: When manifests don't change (no-op), the existing hydrated SHA should be returned.
|
||||
assert.Equal(t, "root-and-blank-sha", resp.HydratedSha, "Should return existing hydrated SHA for no-op")
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -23,8 +23,8 @@ import (
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
|
||||
const gitAttributesContents = `*/README.md linguist-generated=true
|
||||
*/hydrator.metadata linguist-generated=true`
|
||||
const gitAttributesContents = `**/README.md linguist-generated=true
|
||||
**/hydrator.metadata linguist-generated=true`
|
||||
|
||||
func init() {
|
||||
// Avoid allowing the user to learn things about the environment.
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -352,8 +354,76 @@ func TestWriteGitAttributes(t *testing.T) {
|
||||
gitAttributesPath := filepath.Join(root.Name(), ".gitattributes")
|
||||
gitAttributesBytes, err := os.ReadFile(gitAttributesPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/hydrator.metadata linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "hydrator.metadata linguist-generated=true")
|
||||
}
|
||||
|
||||
func TestWriteGitAttributes_MatchesAllDepths(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
err := writeGitAttributes(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The gitattributes pattern needs to match files at all depths:
|
||||
// - hydrator.metadata (root level)
|
||||
// - path1/hydrator.metadata (one level deep)
|
||||
// - path1/nested/deep/hydrator.metadata (multiple levels deep)
|
||||
// Same for README.md files
|
||||
//
|
||||
// The pattern "**/hydrator.metadata" matches at any depth including root
|
||||
// The pattern "*/hydrator.metadata" only matches exactly one directory level deep
|
||||
|
||||
// Test actual Git behavior using git check-attr
|
||||
// Initialize a git repo
|
||||
ctx := t.Context()
|
||||
repoPath := root.Name()
|
||||
cmd := exec.CommandContext(ctx, "git", "init")
|
||||
cmd.Dir = repoPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, "Failed to init git repo: %s", string(output))
|
||||
|
||||
// Test files at different depths
|
||||
testCases := []struct {
|
||||
path string
|
||||
shouldMatch bool
|
||||
description string
|
||||
}{
|
||||
{"hydrator.metadata", true, "root level hydrator.metadata"},
|
||||
{"README.md", true, "root level README.md"},
|
||||
{"path1/hydrator.metadata", true, "one level deep hydrator.metadata"},
|
||||
{"path1/README.md", true, "one level deep README.md"},
|
||||
{"path1/nested/hydrator.metadata", true, "two levels deep hydrator.metadata"},
|
||||
{"path1/nested/README.md", true, "two levels deep README.md"},
|
||||
{"path1/nested/deep/hydrator.metadata", true, "three levels deep hydrator.metadata"},
|
||||
{"path1/nested/deep/README.md", true, "three levels deep README.md"},
|
||||
{"manifest.yaml", false, "manifest.yaml should not match"},
|
||||
{"path1/manifest.yaml", false, "nested manifest.yaml should not match"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// Use git check-attr to verify if linguist-generated attribute is set
|
||||
cmd := exec.CommandContext(ctx, "git", "check-attr", "linguist-generated", tc.path)
|
||||
cmd.Dir = repoPath
|
||||
output, err := cmd.CombinedOutput()
|
||||
require.NoError(t, err, "Failed to run git check-attr: %s", string(output))
|
||||
|
||||
// Output format: <path>: <attribute>: <value>
|
||||
// Example: "hydrator.metadata: linguist-generated: true"
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
if tc.shouldMatch {
|
||||
expectedOutput := tc.path + ": linguist-generated: true"
|
||||
assert.Equal(t, expectedOutput, outputStr,
|
||||
"File %s should have linguist-generated=true attribute", tc.path)
|
||||
} else {
|
||||
// Attribute should be unspecified
|
||||
expectedOutput := tc.path + ": linguist-generated: unspecified"
|
||||
assert.Equal(t, expectedOutput, outputStr,
|
||||
"File %s should not have linguist-generated=true attribute", tc.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsHydrated(t *testing.T) {
|
||||
@@ -401,3 +471,69 @@ func TestAddNote(t *testing.T) {
|
||||
err = AddNote(mockGitClient, drySha, commitShaErr)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestWriteForPaths_NoOpScenario tests that when manifests don't change between two hydrations,
|
||||
// shouldCommit returns false. This reproduces the bug where a new DRY commit that doesn't affect
|
||||
// manifests should not create a new hydrated commit.
|
||||
func TestWriteForPaths_NoOpScenario(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
repoURL := "https://github.com/example/repo"
|
||||
drySha1 := "abc123"
|
||||
drySha2 := "def456" // Different dry SHA
|
||||
paths := []*apiclient.PathDetails{
|
||||
{
|
||||
Path: "guestbook",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{ManifestJSON: `{"apiVersion":"v1","kind":"Service","metadata":{"name":"guestbook-ui"}}`},
|
||||
{ManifestJSON: `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"guestbook-ui"}}`},
|
||||
},
|
||||
Commands: []string{"kustomize build ."},
|
||||
},
|
||||
}
|
||||
|
||||
now1 := metav1.NewTime(time.Now())
|
||||
metadata1 := &appsv1.RevisionMetadata{
|
||||
Author: "test-author",
|
||||
Date: &now1,
|
||||
Message: "Initial commit",
|
||||
}
|
||||
|
||||
// First hydration - manifests are new, so HasFileChanged should return true
|
||||
mockGitClient1 := gitmocks.NewClient(t)
|
||||
mockGitClient1.On("HasFileChanged", "guestbook/manifest.yaml").Return(true, nil).Once()
|
||||
|
||||
shouldCommit1, err := WriteForPaths(root, repoURL, drySha1, metadata1, paths, mockGitClient1)
|
||||
require.NoError(t, err)
|
||||
require.True(t, shouldCommit1, "First hydration should commit because manifests are new")
|
||||
|
||||
// Second hydration - same manifest content but different dry SHA and metadata
|
||||
// Simulate adding a README.md to the dry source (which doesn't affect manifests)
|
||||
now2 := metav1.NewTime(time.Now().Add(1 * time.Hour)) // Different timestamp
|
||||
metadata2 := &appsv1.RevisionMetadata{
|
||||
Author: "test-author",
|
||||
Date: &now2,
|
||||
Message: "Add README.md", // Different commit message
|
||||
}
|
||||
|
||||
// The manifests are identical, so HasFileChanged should return false
|
||||
mockGitClient2 := gitmocks.NewClient(t)
|
||||
mockGitClient2.On("HasFileChanged", "guestbook/manifest.yaml").Return(false, nil).Once()
|
||||
|
||||
shouldCommit2, err := WriteForPaths(root, repoURL, drySha2, metadata2, paths, mockGitClient2)
|
||||
require.NoError(t, err)
|
||||
require.False(t, shouldCommit2, "Second hydration should NOT commit because manifests didn't change")
|
||||
|
||||
// Verify that the root-level metadata WAS updated (even though we're not committing)
|
||||
// The files get written to the working directory, but since shouldCommit is false, they won't be committed
|
||||
topMetadataPath := filepath.Join(root.Name(), "hydrator.metadata")
|
||||
topMetadataBytes, err := os.ReadFile(topMetadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
var topMetadata hydratorMetadataFile
|
||||
err = json.Unmarshal(topMetadataBytes, &topMetadata)
|
||||
require.NoError(t, err)
|
||||
// The top-level metadata should have the NEW dry SHA (files are written, just not committed)
|
||||
assert.Equal(t, drySha2, topMetadata.DrySHA)
|
||||
assert.Equal(t, metadata2.Date.Format(time.RFC3339), topMetadata.Date)
|
||||
}
|
||||
|
||||
@@ -1137,13 +1137,13 @@ func (ctrl *ApplicationController) processProjectQueueItem() (processNext bool)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) finalizeProjectDeletion(proj *appv1.AppProject) error {
|
||||
apps, err := ctrl.appLister.Applications(ctrl.namespace).List(labels.Everything())
|
||||
apps, err := ctrl.appLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing applications: %w", err)
|
||||
}
|
||||
appsCount := 0
|
||||
for i := range apps {
|
||||
if apps[i].Spec.GetProject() == proj.Name {
|
||||
if apps[i].Spec.GetProject() == proj.Name && ctrl.isAppNamespaceAllowed(apps[i]) && proj.IsAppNamespacePermitted(apps[i], ctrl.namespace) {
|
||||
appsCount++
|
||||
}
|
||||
}
|
||||
@@ -1559,8 +1559,18 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
// if we just completed an operation, force a refresh so that UI will report up-to-date
|
||||
// sync/health information
|
||||
if _, err := cache.MetaNamespaceKeyFunc(app); err == nil {
|
||||
// force app refresh with using CompareWithLatest comparison type and trigger app reconciliation loop
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatestForceResolve.Pointer(), nil)
|
||||
var compareWith CompareWith
|
||||
if state.Operation.InitiatedBy.Automated {
|
||||
// Do not force revision resolution on automated operations because
|
||||
// this would cause excessive Ls-Remote requests on monorepo commits
|
||||
compareWith = CompareWithLatest
|
||||
} else {
|
||||
// Force app refresh with using most recent resolved revision after sync,
|
||||
// so UI won't show a just synced application being out of sync if it was
|
||||
// synced after commit but before app. refresh (see #18153)
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
}
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), compareWith.Pointer(), nil)
|
||||
} else {
|
||||
logCtx.WithError(err).Warn("Fails to requeue application")
|
||||
}
|
||||
|
||||
@@ -2302,6 +2302,93 @@ func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) {
|
||||
}, receivedPatch)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_HasApplicationInOtherNamespace(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Namespace = "team-a"
|
||||
proj := &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SourceNamespaces: []string{"team-a"},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, proj},
|
||||
applicationNamespaces: []string{"team-a"},
|
||||
}, nil)
|
||||
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
patched := false
|
||||
fakeAppCs.PrependReactor("patch", "*", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, &v1alpha1.AppProject{}, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, patched)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_IgnoresAppsInUnmonitoredNamespace(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Namespace = "team-b"
|
||||
proj := &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, proj},
|
||||
applicationNamespaces: []string{"team-a"},
|
||||
}, nil)
|
||||
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
receivedPatch := map[string]any{}
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.AppProject{}, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"finalizers": nil,
|
||||
},
|
||||
}, receivedPatch)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_IgnoresAppsNotPermittedByProject(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Namespace = "team-b"
|
||||
proj := &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SourceNamespaces: []string{"team-a"},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, proj},
|
||||
applicationNamespaces: []string{"team-a", "team-b"},
|
||||
}, nil)
|
||||
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
receivedPatch := map[string]any{}
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.AppProject{}, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"finalizers": nil,
|
||||
},
|
||||
}, receivedPatch)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
@@ -2546,6 +2633,41 @@ func TestProcessRequestedAppOperation_Successful(t *testing.T) {
|
||||
assert.Equal(t, CompareWithLatestForceResolve, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppAutomatedOperation_Successful(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
InitiatedBy: v1alpha1.OperationInitiator{
|
||||
Automated: true,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponses: []*apiclient.ManifestResponse{{
|
||||
Manifests: []string{},
|
||||
}},
|
||||
}, nil)
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
receivedPatch := map[string]any{}
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
|
||||
assert.Equal(t, "successfully synced (no more tasks)", message)
|
||||
ok, level := ctrl.isRefreshRequested(ctrl.toAppKey(app.Name))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, CompareWithLatest, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_SyncTimeout(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -255,9 +255,6 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
|
||||
|
||||
appNamespace := app.Spec.Destination.Namespace
|
||||
apiVersions := argo.APIResourcesToStrings(apiResources, true)
|
||||
if !sendRuntimeState {
|
||||
appNamespace = ""
|
||||
}
|
||||
|
||||
updateRevisions := processManifestGeneratePathsEnabled &&
|
||||
// updating revisions result is not required if automated sync is not enabled
|
||||
@@ -273,7 +270,7 @@ func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Applica
|
||||
Revision: revision,
|
||||
SyncedRevision: syncedRevision,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
Paths: path.GetAppRefreshPaths(app),
|
||||
Paths: path.GetSourceRefreshPaths(app, source),
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: appNamespace,
|
||||
|
||||
@@ -3,29 +3,33 @@
|
||||
The test [directory](https://github.com/argoproj/argo-cd/tree/master/test) contains E2E tests and test applications. The tests assume that Argo CD services are installed into `argocd-e2e` namespace or cluster in current context. A throw-away
|
||||
namespace `argocd-e2e***` is created prior to the execution of the tests. The throw-away namespace is used as a target namespace for test applications.
|
||||
|
||||
The [/test/e2e/testdata](https://github.com/argoproj/argo-cd/tree/master/test/e2e/testdata) directory contains various Argo CD applications. Before test execution, the directory is copied into `/tmp/argo-e2e***` temp directory and used in tests as a
|
||||
The [/test/e2e/testdata](https://github.com/argoproj/argo-cd/tree/master/test/e2e/testdata) directory contains various Argo CD applications. Before test execution, the directory is copied into `/tmp/argo-e2e***` temp directory (configurable by `ARGOCD_E2E_DIR`) and used in tests as a
|
||||
Git repository via file url: `file:///tmp/argo-e2e***`.
|
||||
|
||||
> [!NOTE]
|
||||
> You might get an error such as `unable to ls-remote HEAD on repository: failed to list refs: repository not found` when querying the local repository exposed through the e2e server running in a container.
|
||||
> This is often caused by `/tmp` directoring sharing protection. You can configure a different directory with `ARGOCD_E2E_DIR`, or disable the directory sharing protection.
|
||||
>
|
||||
> **Rancher Desktop Volume Sharing**
|
||||
>
|
||||
> The e2e git server runs in a container. If you are using Rancher Desktop, you will need to enable volume sharing for
|
||||
> the e2e container to access the testdata directory. To do this, add the following to
|
||||
> To do enable `/tmp` sharing, add the following to
|
||||
> `~/Library/Application\ Support/rancher-desktop/lima/_config/override.yaml` and restart Rancher Desktop:
|
||||
>
|
||||
> ```yaml
|
||||
> mounts:
|
||||
> - location: /private/tmp
|
||||
> writable: true
|
||||
> - location: /private/tmp
|
||||
> writable: true
|
||||
> ```
|
||||
|
||||
## Running Tests Locally
|
||||
|
||||
### With virtualized chain
|
||||
|
||||
1. Start the e2e version `make start-e2e`
|
||||
2. Run the tests: `make test-e2e`
|
||||
|
||||
### With local chain
|
||||
|
||||
1. Start the e2e version `make start-e2e-local`
|
||||
2. Run the tests: `make test-e2e-local`
|
||||
|
||||
@@ -37,32 +41,32 @@ You can observe the tests by using the UI [http://localhost:4000/applications](h
|
||||
|
||||
The Makefile's `start-e2e` target starts instances of ArgoCD on your local machine, of which the most will require a network listener. If, for any reason, your machine already has network services listening on the same ports, then the e2e tests will not run. You can derive from the defaults by setting the following environment variables before you run `make start-e2e`:
|
||||
|
||||
* `ARGOCD_E2E_APISERVER_PORT`: Listener port for `argocd-server` (default: `8080`)
|
||||
* `ARGOCD_E2E_REPOSERVER_PORT`: Listener port for `argocd-reposerver` (default: `8081`)
|
||||
* `ARGOCD_E2E_DEX_PORT`: Listener port for `dex` (default: `5556`)
|
||||
* `ARGOCD_E2E_REDIS_PORT`: Listener port for `redis` (default: `6379`)
|
||||
* `ARGOCD_E2E_YARN_CMD`: Command to use for starting the UI via Yarn (default: `yarn`)
|
||||
- `ARGOCD_E2E_APISERVER_PORT`: Listener port for `argocd-server` (default: `8080`)
|
||||
- `ARGOCD_E2E_REPOSERVER_PORT`: Listener port for `argocd-reposerver` (default: `8081`)
|
||||
- `ARGOCD_E2E_DEX_PORT`: Listener port for `dex` (default: `5556`)
|
||||
- `ARGOCD_E2E_REDIS_PORT`: Listener port for `redis` (default: `6379`)
|
||||
- `ARGOCD_E2E_YARN_CMD`: Command to use for starting the UI via Yarn (default: `yarn`)
|
||||
- `ARGOCD_E2E_DIR`: Local path to the repository to use for ephemeral test data
|
||||
|
||||
If you have changed the port for `argocd-server`, be sure to also set `ARGOCD_SERVER` environment variable to point to that port, e.g. `export ARGOCD_SERVER=localhost:8888` before running `make test-e2e` so that the test will communicate to the correct server component.
|
||||
|
||||
|
||||
## Test Isolation
|
||||
|
||||
Some effort has been made to balance test isolation with speed. Tests are isolated as follows as each test gets:
|
||||
|
||||
* A random 5 character ID.
|
||||
* A unique Git repository containing the `testdata` in `/tmp/argo-e2e/${id}`.
|
||||
* A namespace `argocd-e2e-ns-${id}`.
|
||||
* A primary name for the app `argocd-e2e-${id}`.
|
||||
- A random 5 character ID.
|
||||
- A unique Git repository containing the `testdata` in `/tmp/argo-e2e/${id}`.
|
||||
- A namespace `argocd-e2e-ns-${id}`.
|
||||
- A primary name for the app `argocd-e2e-${id}`.
|
||||
|
||||
## Run only a subset of tests
|
||||
|
||||
Running all tests locally is a time-consuming process. To run only a subset of tests, you can set the `TEST_MODULE` environment variable.
|
||||
For example, to run only the OCI tests, you can set the variable as follows: `make TEST_MODULE=./test/e2e/oci_test.go test-e2e-local`
|
||||
Running all tests locally is a time-consuming process. To run only a subset of tests, you can set the `TEST_MODULE` environment variable.
|
||||
For example, to run only the OCI tests, you can set the variable as follows: `make TEST_MODULE=./test/e2e/oci_test.go test-e2e-local`
|
||||
|
||||
If you want to get a more fine-grained control over which tests to run, you can also try `make TEST_FLAGS="-run <TEST_METHOD_NAME_REGEXP>" test-e2e-local`
|
||||
For individual tests you can run them using the IDE run test feature
|
||||
|
||||
For individual tests you can run them using the IDE run test feature
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Tests fails to delete `argocd-e2e-ns-*` namespaces.**
|
||||
|
||||
@@ -47,7 +47,8 @@ metadata:
|
||||
* [Grafana](./grafana.md)
|
||||
* [Webhook](./webhook.md)
|
||||
* [Telegram](./telegram.md)
|
||||
* [Teams](./teams.md)
|
||||
* [Teams (Office 365 Connectors)](./teams.md) - Legacy service (deprecated, retires March 31, 2026)
|
||||
* [Teams Workflows](./teams-workflows.md) - Recommended replacement for Office 365 Connectors
|
||||
* [Google Chat](./googlechat.md)
|
||||
* [Rocket.Chat](./rocketchat.md)
|
||||
* [Pushover](./pushover.md)
|
||||
|
||||
@@ -62,6 +62,8 @@ The parameters for the PagerDuty configuration in the template generally match w
|
||||
* `group` - Logical grouping of components of a service.
|
||||
* `class` - The class/type of the event.
|
||||
* `url` - The URL that should be used for the link "View in ArgoCD" in PagerDuty.
|
||||
* `dedupKey` - A string used by PagerDuty to deduplicate and correlate events. Events with the same `dedupKey` will be grouped into the same incident. If omitted, PagerDuty will create a new incident for each event.
|
||||
|
||||
|
||||
The `timestamp` and `custom_details` parameters are not currently supported.
|
||||
|
||||
|
||||
370
docs/operator-manual/notifications/services/teams-workflows.md
Executable file
370
docs/operator-manual/notifications/services/teams-workflows.md
Executable file
@@ -0,0 +1,370 @@
|
||||
# Teams Workflows
|
||||
|
||||
## Overview
|
||||
|
||||
The Teams Workflows notification service sends message notifications using Microsoft Teams Workflows (Power Automate). This is the recommended replacement for the legacy Office 365 Connectors service, which will be retired on March 31, 2026.
|
||||
|
||||
## Parameters
|
||||
|
||||
The Teams Workflows notification service requires specifying the following settings:
|
||||
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://api.powerautomate.com/webhook/...`
|
||||
|
||||
## Supported Webhook URL Formats
|
||||
|
||||
The service supports the following Microsoft Teams Workflows webhook URL patterns:
|
||||
|
||||
- `https://api.powerautomate.com/...`
|
||||
- `https://api.powerplatform.com/...`
|
||||
- `https://flow.microsoft.com/...`
|
||||
- URLs containing `/powerautomate/` in the path
|
||||
|
||||
## Configuration
|
||||
|
||||
1. Open `Teams` and go to the channel you wish to set notifications for
|
||||
2. Click on the 3 dots next to the channel name
|
||||
3. Select`Workflows`
|
||||
4. Click on `Manage`
|
||||
5. Click `New flow`
|
||||
6. Write `Send webhook alerts to a channel` in the search bar or select it from the template list
|
||||
7. Choose your team and channel
|
||||
8. Configure the webhook name and settings
|
||||
9. Copy the webhook URL (it will be from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`)
|
||||
10. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-notifications-cm
|
||||
data:
|
||||
service.teams-workflows: |
|
||||
recipientUrls:
|
||||
channelName: $channel-workflows-url
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
channel-workflows-url: https://api.powerautomate.com/webhook/your-webhook-id
|
||||
```
|
||||
|
||||
11. Create subscription for your Teams Workflows integration:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
|
||||
```
|
||||
|
||||
## Channel Support
|
||||
|
||||
- ✅ Standard Teams channels
|
||||
- ✅ Shared channels (as of December 2025)
|
||||
- ✅ Private channels (as of December 2025)
|
||||
|
||||
Teams Workflows provides enhanced channel support compared to Office 365 Connectors, allowing you to post to shared and private channels in addition to standard channels.
|
||||
|
||||
## Adaptive Card Format
|
||||
|
||||
The Teams Workflows service uses **Adaptive Cards** exclusively, which is the modern, flexible card format for Microsoft Teams. All notifications are automatically converted to Adaptive Card format and wrapped in the required message envelope.
|
||||
|
||||
### Option 1: Using Template Fields (Recommended)
|
||||
|
||||
The service automatically converts template fields to Adaptive Card format. This is the simplest and most maintainable approach:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams-workflows:
|
||||
# ThemeColor supports Adaptive Card semantic colors: "Good", "Warning", "Attention", "Accent"
|
||||
# or hex colors like "#000080"
|
||||
themeColor: "Good"
|
||||
title: Application {{.app.metadata.name}} has been successfully synced
|
||||
text: Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.
|
||||
summary: "{{.app.metadata.name}} sync succeeded"
|
||||
facts: |
|
||||
[{
|
||||
"name": "Sync Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
}, {
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}]
|
||||
sections: |
|
||||
[{
|
||||
"facts": [
|
||||
{
|
||||
"name": "Namespace",
|
||||
"value": "{{.app.metadata.namespace}}"
|
||||
},
|
||||
{
|
||||
"name": "Cluster",
|
||||
"value": "{{.app.spec.destination.server}}"
|
||||
}
|
||||
]
|
||||
}]
|
||||
potentialAction: |-
|
||||
[{
|
||||
"@type": "OpenUri",
|
||||
"name": "View in Argo CD",
|
||||
"targets": [{
|
||||
"os": "default",
|
||||
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
- `title` → Converted to a large, bold TextBlock
|
||||
- `text` → Converted to a regular TextBlock
|
||||
- `facts` → Converted to a FactSet element
|
||||
- `sections` → Facts within sections are extracted and converted to FactSet elements
|
||||
- `potentialAction` → OpenUri actions are converted to Action.OpenUrl
|
||||
- `themeColor` → Applied to the title TextBlock (supports semantic colors like "Good", "Warning", "Attention", "Accent" or hex colors)
|
||||
|
||||
### Option 2: Custom Adaptive Card JSON
|
||||
|
||||
For full control and advanced features, you can provide a complete Adaptive Card JSON template:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams-workflows:
|
||||
adaptiveCard: |
|
||||
{
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.4",
|
||||
"body": [
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}} synced successfully",
|
||||
"size": "Large",
|
||||
"weight": "Bolder",
|
||||
"color": "Good"
|
||||
},
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.",
|
||||
"wrap": true
|
||||
},
|
||||
{
|
||||
"type": "FactSet",
|
||||
"facts": [
|
||||
{
|
||||
"title": "Sync Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"type": "Action.OpenUrl",
|
||||
"title": "View in Argo CD",
|
||||
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** When using `adaptiveCard`, you only need to provide the AdaptiveCard JSON structure (not the full message envelope). The service automatically wraps it in the required `message` + `attachments` format for Teams Workflows.
|
||||
|
||||
**Important:** If you provide `adaptiveCard`, it takes precedence over all other template fields (`title`, `text`, `facts`, etc.).
|
||||
|
||||
## Template Fields
|
||||
|
||||
The Teams Workflows service supports the following template fields, which are automatically converted to Adaptive Card format:
|
||||
|
||||
### Standard Fields
|
||||
|
||||
- `title` - Message title (converted to large, bold TextBlock)
|
||||
- `text` - Message text content (converted to TextBlock)
|
||||
- `summary` - Summary text (currently not used in Adaptive Cards, but preserved for compatibility)
|
||||
- `themeColor` - Color for the title. Supports:
|
||||
- Semantic colors: `"Good"` (green), `"Warning"` (yellow), `"Attention"` (red), `"Accent"` (blue)
|
||||
- Hex colors: `"#000080"`, `"#FF0000"`, etc.
|
||||
- `facts` - JSON array of fact key-value pairs (converted to FactSet)
|
||||
```yaml
|
||||
facts: |
|
||||
[{
|
||||
"name": "Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
}]
|
||||
```
|
||||
- `sections` - JSON array of sections containing facts (facts are extracted and converted to FactSet)
|
||||
```yaml
|
||||
sections: |
|
||||
[{
|
||||
"facts": [{
|
||||
"name": "Namespace",
|
||||
"value": "{{.app.metadata.namespace}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
- `potentialAction` - JSON array of action buttons (OpenUri actions converted to Action.OpenUrl)
|
||||
```yaml
|
||||
potentialAction: |-
|
||||
[{
|
||||
"@type": "OpenUri",
|
||||
"name": "View Details",
|
||||
"targets": [{
|
||||
"os": "default",
|
||||
"uri": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}]
|
||||
}]
|
||||
```
|
||||
|
||||
### Advanced Fields
|
||||
|
||||
- `adaptiveCard` - Complete Adaptive Card JSON template (takes precedence over all other fields)
|
||||
- Only provide the AdaptiveCard structure, not the message envelope
|
||||
- Supports full Adaptive Card 1.4 specification
|
||||
- Allows access to all Adaptive Card features (containers, columns, images, etc.)
|
||||
|
||||
- `template` - Raw JSON template (legacy, use `adaptiveCard` instead)
|
||||
|
||||
### Field Conversion Details
|
||||
|
||||
| Template Field | Adaptive Card Element | Notes |
|
||||
|---------------|----------------------|-------|
|
||||
| `title` | `TextBlock` with `size: "Large"`, `weight: "Bolder"` | ThemeColor applied to this element |
|
||||
| `text` | `TextBlock` with `wrap: true` | Uses `n.Message` if `text` is empty |
|
||||
| `facts` | `FactSet` | Each fact becomes a `title`/`value` pair |
|
||||
| `sections[].facts` | `FactSet` | Facts extracted from sections |
|
||||
| `potentialAction[OpenUri]` | `Action.OpenUrl` | Only OpenUri actions are converted |
|
||||
| `themeColor` | Applied to title `TextBlock.color` | Supports semantic and hex colors |
|
||||
|
||||
## Migration from Office 365 Connectors
|
||||
|
||||
If you're currently using the `teams` service with Office 365 Connectors, follow these steps to migrate:
|
||||
|
||||
1. **Create a new Workflows webhook** using the configuration steps above
|
||||
|
||||
2. **Update your service configuration:**
|
||||
- Change from `service.teams` to `service.teams-workflows`
|
||||
- Update the webhook URL to your new Workflows webhook URL
|
||||
|
||||
3. **Update your templates:**
|
||||
- Change `teams:` to `teams-workflows:` in your templates
|
||||
- Your existing template fields (`title`, `text`, `facts`, `sections`, `potentialAction`) will automatically be converted to Adaptive Card format
|
||||
- No changes needed to your template structure - the conversion is automatic
|
||||
|
||||
4. **Update your subscriptions:**
|
||||
```yaml
|
||||
# Old
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
|
||||
|
||||
# New
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams-workflows: channelName
|
||||
```
|
||||
|
||||
5. **Test and verify:**
|
||||
- Send a test notification to verify it works correctly
|
||||
- Once verified, you can remove the old Office 365 Connector configuration
|
||||
|
||||
**Note:** Your existing templates will work without modification. The service automatically converts your template fields to Adaptive Card format, so you get the benefits of modern cards without changing your templates.
|
||||
|
||||
## Differences from Office 365 Connectors
|
||||
|
||||
| Feature | Office 365 Connectors | Teams Workflows |
|
||||
|---------|----------------------|-----------------|
|
||||
| Service Name | `teams` | `teams-workflows` |
|
||||
| Standard Channels | ✅ | ✅ |
|
||||
| Shared Channels | ❌ | ✅ (Dec 2025+) |
|
||||
| Private Channels | ❌ | ✅ (Dec 2025+) |
|
||||
| Card Format | messageCard (legacy) | Adaptive Cards (modern) |
|
||||
| Template Conversion | N/A | Automatic conversion from template fields |
|
||||
| Retirement Date | March 31, 2026 | Active |
|
||||
|
||||
## Adaptive Card Features
|
||||
|
||||
The Teams Workflows service leverages Adaptive Cards, which provide:
|
||||
|
||||
- **Rich Content**: Support for text, images, fact sets, and more
|
||||
- **Flexible Layout**: Containers, columns, and adaptive layouts
|
||||
- **Interactive Elements**: Action buttons, input fields, and more
|
||||
- **Semantic Colors**: Built-in color schemes (Good, Warning, Attention, Accent)
|
||||
- **Cross-Platform**: Works across Teams, Outlook, and other Microsoft 365 apps
|
||||
|
||||
### Example: Advanced Adaptive Card Template
|
||||
|
||||
For complex notifications, you can use the full Adaptive Card specification:
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded-advanced: |
|
||||
teams-workflows:
|
||||
adaptiveCard: |
|
||||
{
|
||||
"type": "AdaptiveCard",
|
||||
"version": "1.4",
|
||||
"body": [
|
||||
{
|
||||
"type": "Container",
|
||||
"items": [
|
||||
{
|
||||
"type": "ColumnSet",
|
||||
"columns": [
|
||||
{
|
||||
"type": "Column",
|
||||
"width": "auto",
|
||||
"items": [
|
||||
{
|
||||
"type": "Image",
|
||||
"url": "https://example.com/success-icon.png",
|
||||
"size": "Small"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "Column",
|
||||
"width": "stretch",
|
||||
"items": [
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Application {{.app.metadata.name}}",
|
||||
"weight": "Bolder",
|
||||
"size": "Large"
|
||||
},
|
||||
{
|
||||
"type": "TextBlock",
|
||||
"text": "Successfully synced",
|
||||
"spacing": "None",
|
||||
"isSubtle": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "FactSet",
|
||||
"facts": [
|
||||
{
|
||||
"title": "Status",
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"type": "Action.OpenUrl",
|
||||
"title": "View in Argo CD",
|
||||
"url": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -1,18 +1,46 @@
|
||||
# Teams
|
||||
# Teams (Office 365 Connectors)
|
||||
|
||||
## ⚠️ Deprecation Notice
|
||||
|
||||
**Office 365 Connectors are being retired by Microsoft.**
|
||||
|
||||
Microsoft is retiring the Office 365 Connectors service in Teams. The service will be fully retired by **March 31, 2026** (extended from the original timeline of December 2025).
|
||||
|
||||
### What this means:
|
||||
- **Old Office 365 Connectors** (webhook URLs from `webhook.office.com`) will stop working after the retirement date
|
||||
- **New Power Automate Workflows** (webhook URLs from `api.powerautomate.com`, `api.powerplatform.com`, or `flow.microsoft.com`) are the recommended replacement
|
||||
|
||||
### Migration Required:
|
||||
If you are currently using Office 365 Connectors (Incoming Webhook), you should migrate to Power Automate Workflows before the retirement date. The notifications-engine automatically detects the webhook type and handles both formats, but you should plan your migration.
|
||||
|
||||
**Migration Resources:**
|
||||
- [Microsoft Deprecation Notice](https://devblogs.microsoft.com/microsoft365dev/retirement-of-office-365-connectors-within-microsoft-teams/)
|
||||
- [Create incoming webhooks with Workflows for Microsoft Teams](https://support.microsoft.com/en-us/office/create-incoming-webhooks-with-workflows-for-microsoft-teams-4b3b0b0e-0b5a-4b5a-9b5a-0b5a-4b5a-9b5a)
|
||||
|
||||
---
|
||||
|
||||
## Parameters
|
||||
|
||||
The Teams notification service send message notifications using Teams bot and requires specifying the following settings:
|
||||
The Teams notification service sends message notifications using Office 365 Connectors and requires specifying the following settings:
|
||||
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://example.com`
|
||||
* `recipientUrls` - the webhook url map, e.g. `channelName: https://outlook.office.com/webhook/...`
|
||||
|
||||
> **⚠️ Deprecation Notice:** Office 365 Connectors will be retired by Microsoft on **March 31, 2026**. We recommend migrating to the [Teams Workflows service](./teams-workflows.md) for continued support and enhanced features.
|
||||
|
||||
## Configuration
|
||||
|
||||
> **💡 For Power Automate Workflows (Recommended):** See the [Teams Workflows documentation](./teams-workflows.md) for detailed configuration instructions.
|
||||
|
||||
### Office 365 Connectors (Deprecated - Retiring March 31, 2026)
|
||||
|
||||
> **⚠️ Warning:** This method is deprecated and will stop working after March 31, 2026. Please migrate to Power Automate Workflows.
|
||||
|
||||
1. Open `Teams` and goto `Apps`
|
||||
2. Find `Incoming Webhook` microsoft app and click on it
|
||||
3. Press `Add to a team` -> select team and channel -> press `Set up a connector`
|
||||
4. Enter webhook name and upload image (optional)
|
||||
5. Press `Create` then copy webhook url and store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
5. Press `Create` then copy webhook url (it will be from `webhook.office.com`)
|
||||
6. Store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm`
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -31,10 +59,20 @@ kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
channel-teams-url: https://example.com
|
||||
channel-teams-url: https://webhook.office.com/webhook/your-webhook-id # Office 365 Connector (deprecated)
|
||||
```
|
||||
|
||||
6. Create subscription for your Teams integration:
|
||||
> **Note:** For Power Automate Workflows webhooks, use the [Teams Workflows service](./teams-workflows.md) instead.
|
||||
|
||||
### Webhook Type Detection
|
||||
|
||||
The `teams` service supports Office 365 Connectors (deprecated):
|
||||
|
||||
- **Office 365 Connectors**: URLs from `webhook.office.com` (deprecated)
|
||||
- Requires response body to be exactly `"1"` for success
|
||||
- Will stop working after March 31, 2026
|
||||
|
||||
7. Create subscription for your Teams integration:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
@@ -44,12 +82,20 @@ metadata:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.teams: channelName
|
||||
```
|
||||
|
||||
## Channel Support
|
||||
|
||||
- ✅ Standard Teams channels only
|
||||
|
||||
> **Note:** Office 365 Connectors only support standard Teams channels. For shared channels or private channels, use the [Teams Workflows service](./teams-workflows.md).
|
||||
|
||||
## Templates
|
||||
|
||||

|
||||
|
||||
[Notification templates](../templates.md) can be customized to leverage teams message sections, facts, themeColor, summary and potentialAction [feature](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using).
|
||||
|
||||
The Teams service uses the **messageCard** format (MessageCard schema) which is compatible with Office 365 Connectors.
|
||||
|
||||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
teams:
|
||||
@@ -124,3 +170,7 @@ template.app-sync-succeeded: |
|
||||
teams:
|
||||
summary: "Sync Succeeded"
|
||||
```
|
||||
|
||||
## Migration to Teams Workflows
|
||||
|
||||
If you're currently using Office 365 Connectors, see the [Teams Workflows documentation](./teams-workflows.md) for migration instructions and enhanced features.
|
||||
|
||||
@@ -78,6 +78,29 @@ metadata:
|
||||
notifications.argoproj.io/subscribe.<trigger-name>.<webhook-name>: ""
|
||||
```
|
||||
|
||||
4. TLS configuration (optional)
|
||||
|
||||
If your webhook server uses a custom TLS certificate, you can configure the notification service to trust it by adding the certificate to the `argocd-tls-certs-cm` ConfigMap as shown below:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-tls-certs-cm
|
||||
data:
|
||||
<hostname>: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
<TLS DATA>
|
||||
-----END CERTIFICATE-----
|
||||
```
|
||||
|
||||
*NOTE:*
|
||||
*If the custom certificate is not trusted, you may encounter errors such as:*
|
||||
```
|
||||
Put \"https://...\": x509: certificate signed by unknown authority
|
||||
```
|
||||
*Adding the server's certificate to `argocd-tls-certs-cm` resolves this issue.*
|
||||
|
||||
## Examples
|
||||
|
||||
### Set GitHub commit status
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
|
||||
version.
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 3.3 | v1.34, v1.33, v1.32, v1.31 |
|
||||
| 3.2 | v1.34, v1.33, v1.32, v1.31 |
|
||||
| 3.1 | v1.34, v1.33, v1.32, v1.31 |
|
||||
|
||||
@@ -84,16 +84,16 @@ From now onwards, the Kubernetes server-side timeout is controlled by a separate
|
||||
The `--self-heal-backoff-cooldown-seconds` flag of the `argocd-application-controller` has been deprecated and will be
|
||||
removed in a future release.
|
||||
|
||||
## Helm Upgraded to 3.19.2
|
||||
## Helm Upgraded to 3.19.4
|
||||
|
||||
Argo CD v3.3 upgrades the bundled Helm version to 3.19.2. There are no breaking changes in Helm 3.19.2 according to the
|
||||
Argo CD v3.3 upgrades the bundled Helm version to 3.19.4. There are no breaking changes in Helm 3.19.4 according to the
|
||||
[release notes](https://github.com/helm/helm/releases/tag/v3.19.0).
|
||||
|
||||
## Kustomize Upgraded to 5.8.0
|
||||
## Kustomize Upgraded to 5.8.1
|
||||
|
||||
Argo CD v3.3 upgrades the bundled Kustomize version from v5.7.0 to v5.8.0. According to the
|
||||
Argo CD v3.3 upgrades the bundled Kustomize version from v5.7.0 to v5.8.1. According to the
|
||||
[5.7.1](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv5.7.1)
|
||||
and [5.8.0](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv5.8.0) release notes, there are no breaking changes.
|
||||
and [5.8.1](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv5.8.1) release notes, there are no breaking changes.
|
||||
|
||||
However, note that Kustomize 5.7.1 introduces code to replace the `shlex` library used for parsing arguments in exec plugins.
|
||||
If any existing manifests become corrupted, please follow the instructions in the
|
||||
@@ -111,4 +111,4 @@ If you rely on Helm charts within kustomization files, please review the details
|
||||
* [keda.sh/ScaledJob](https://github.com/argoproj/argo-cd/commit/e58bdf2f87b5b60a05fde0b7837779061b170c08)
|
||||
* [services.cloud.sap.com/ServiceBinding](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
|
||||
* [services.cloud.sap.com/ServiceInstance](https://github.com/argoproj/argo-cd/commit/51c9add05d9bc8f8fafc1631968eb853db53a904)
|
||||
* [\_.cnrm.cloud.google.com/\_](https://github.com/argoproj/argo-cd/commit/30abebda3d930d93065eec8864aac7e0d56ae119)
|
||||
* [\_.cnrm.cloud.google.com/\_](https://github.com/argoproj/argo-cd/commit/30abebda3d930d93065eec8864aac7e0d56ae119)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/argoproj/gitops-engine
|
||||
|
||||
go 1.24.0
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
@@ -22,7 +22,7 @@ require (
|
||||
k8s.io/kube-aggregator v0.34.0
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
k8s.io/kubectl v0.34.0
|
||||
k8s.io/kubernetes v1.34.0
|
||||
k8s.io/kubernetes v1.34.2
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.1-0.20251003215857-446d8398e19c
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
|
||||
@@ -247,8 +247,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs=
|
||||
k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4=
|
||||
k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w=
|
||||
k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
|
||||
k8s.io/kubernetes v1.34.2 h1:WQdDvYJazkmkwSncgNwGvVtaCt4TYXIU3wSMRgvp3MI=
|
||||
k8s.io/kubernetes v1.34.2/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
|
||||
@@ -1168,8 +1168,8 @@ func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.R
|
||||
|
||||
var err error
|
||||
var message string
|
||||
shouldReplace := sc.replace || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionReplace)
|
||||
force := sc.force || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionForce)
|
||||
shouldReplace := sc.replace || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionReplace) || (t.liveObj != nil && resourceutil.HasAnnotationOption(t.liveObj, common.AnnotationSyncOptions, common.SyncOptionReplace))
|
||||
force := sc.force || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionForce) || (t.liveObj != nil && resourceutil.HasAnnotationOption(t.liveObj, common.AnnotationSyncOptions, common.SyncOptionForce))
|
||||
serverSideApply := sc.shouldUseServerSideApply(t.targetObj, dryRun)
|
||||
|
||||
// Check if we need to perform client-side apply migration for server-side apply
|
||||
|
||||
@@ -881,6 +881,7 @@ func TestSync_Replace(t *testing.T) {
|
||||
}{
|
||||
{"NoAnnotation", testingutils.NewPod(), testingutils.NewPod(), "apply"},
|
||||
{"AnnotationIsSet", withReplaceAnnotation(testingutils.NewPod()), testingutils.NewPod(), "replace"},
|
||||
{"AnnotationIsSetOnLive", testingutils.NewPod(), withReplaceAnnotation(testingutils.NewPod()), "replace"},
|
||||
{"LiveObjectMissing", withReplaceAnnotation(testingutils.NewPod()), nil, "create"},
|
||||
}
|
||||
|
||||
@@ -1047,6 +1048,7 @@ func TestSync_Force(t *testing.T) {
|
||||
{"NoAnnotation", testingutils.NewPod(), testingutils.NewPod(), "apply", false},
|
||||
{"ForceApplyAnnotationIsSet", withForceAnnotation(testingutils.NewPod()), testingutils.NewPod(), "apply", true},
|
||||
{"ForceReplaceAnnotationIsSet", withForceAndReplaceAnnotations(testingutils.NewPod()), testingutils.NewPod(), "replace", true},
|
||||
{"ForceReplaceAnnotationIsSetOnLive", testingutils.NewPod(), withForceAndReplaceAnnotations(testingutils.NewPod()), "replace", true},
|
||||
{"LiveObjectMissing", withReplaceAnnotation(testingutils.NewPod()), nil, "create", false},
|
||||
}
|
||||
|
||||
|
||||
10
go.mod
10
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/argoproj/argo-cd/v3
|
||||
|
||||
go 1.25.0
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
code.gitea.io/sdk/gitea v0.22.1
|
||||
@@ -13,7 +13,7 @@ require (
|
||||
github.com/TomOnTime/utfutil v1.0.0
|
||||
github.com/alicebob/miniredis/v2 v2.35.0
|
||||
github.com/argoproj/gitops-engine v0.7.1-0.20250908182407-97ad5b59a627
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20251129223737-e2e7fe18381a
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630
|
||||
github.com/argoproj/pkg v0.13.6
|
||||
github.com/argoproj/pkg/v2 v2.0.1
|
||||
github.com/aws/aws-sdk-go v1.55.7
|
||||
@@ -22,6 +22,7 @@ require (
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.17.0
|
||||
github.com/casbin/casbin/v2 v2.135.0
|
||||
github.com/casbin/govaluate v1.10.0
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/chainguard-dev/git-urls v1.0.2
|
||||
github.com/coreos/go-oidc/v3 v3.14.1
|
||||
@@ -29,7 +30,7 @@ require (
|
||||
github.com/dlclark/regexp2 v1.11.5
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible
|
||||
github.com/expr-lang/expr v1.17.6
|
||||
github.com/expr-lang/expr v1.17.7
|
||||
github.com/felixge/httpsnoop v1.0.4
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/gfleury/go-bitbucket-v1 v0.0.0-20240917142304-df385efaac68
|
||||
@@ -160,7 +161,6 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.3 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.6.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
@@ -304,7 +304,7 @@ require (
|
||||
k8s.io/controller-manager v0.34.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kube-aggregator v0.34.0 // indirect
|
||||
k8s.io/kubernetes v1.34.0 // indirect
|
||||
k8s.io/kubernetes v1.34.2 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
|
||||
12
go.sum
12
go.sum
@@ -115,8 +115,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20251129223737-e2e7fe18381a h1:tAyJp5VIEKM5OUUJJIDwSGMgYPwcSE6SAtAQ2ykVU30=
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20251129223737-e2e7fe18381a/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630 h1:naE5KNRTOALjF5nVIGUHrHU5xjlB8QJJiCu+aISIlSs=
|
||||
github.com/argoproj/notifications-engine v0.5.1-0.20260119155007-a23b5827d630/go.mod h1:d1RazGXWvKRFv9//rg4MRRR7rbvbE7XLgTSMT5fITTE=
|
||||
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
|
||||
github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8=
|
||||
github.com/argoproj/pkg/v2 v2.0.1 h1:O/gCETzB/3+/hyFL/7d/VM/6pSOIRWIiBOTb2xqAHvc=
|
||||
@@ -261,8 +261,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/expr-lang/expr v1.17.6 h1:1h6i8ONk9cexhDmowO/A64VPxHScu7qfSl2k8OlINec=
|
||||
github.com/expr-lang/expr v1.17.6/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
|
||||
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||
@@ -1461,8 +1461,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs=
|
||||
k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4=
|
||||
k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w=
|
||||
k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto=
|
||||
k8s.io/kubernetes v1.34.2 h1:WQdDvYJazkmkwSncgNwGvVtaCt4TYXIU3wSMRgvp3MI=
|
||||
k8s.io/kubernetes v1.34.2/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/argoproj/argo-cd/get-previous-release
|
||||
|
||||
go 1.23.5
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
d9c9b1fc499c54282c4127c60cdd506da2c6202506b708a2b45fb6dfdb318f43 helm-v3.19.4-darwin-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
7e82ca63fe80a298cecefad61d0c10bc47963ff3551e94ab6470be6393a6a74b helm-v3.19.4-darwin-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
759c656fbd9c11e6a47784ecbeac6ad1eb16a9e76d202e51163ab78504848862 helm-v3.19.4-linux-amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
9e1064f5de43745bdedbff2722a1674d0397bc4b4d8d8196d52a2b730909fe62 helm-v3.19.4-linux-arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
a38d8f75406f9bc3e12d1ebf8819fd563a5156ada6fe665402732932eec9c743 helm-v3.19.4-linux-ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
d153b3a316ce3f2936e601d94db5909aae4fbd5d1a4b28760fad2dd18c2bb749 helm-v3.19.4-linux-s390x.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
ee7cf0c1e3592aa7bb66ba82b359933a95e7f2e0b36e5f53ed0a4535b017f2f8 kustomize_5.8.1_darwin_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
8886f8a78474e608cc81234f729fda188a9767da23e28925802f00ece2bab288 kustomize_5.8.1_darwin_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
029a7f0f4e1932c52a0476cf02a0fd855c0bb85694b82c338fc648dcb53a819d kustomize_5.8.1_linux_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
0953ea3e476f66d6ddfcd911d750f5167b9365aa9491b2326398e289fef2c142 kustomize_5.8.1_linux_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
87ffa6d248d6baceb35463042da354a317bfd3ee49afc7f9850c05c36319c708 kustomize_5.8.1_linux_ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
12df0fcec017a82b41d87b85c53263ae9657740b130eba42381bae3495521c9b kustomize_5.8.1_linux_s390x.tar.gz
|
||||
@@ -9,6 +9,10 @@ which gotestsum || go install gotest.tools/gotestsum@latest
|
||||
|
||||
TEST_RESULTS=${TEST_RESULTS:-test-results}
|
||||
TEST_FLAGS=${TEST_FLAGS:-}
|
||||
DIST_DIR=${DIST_DIR:-dist}
|
||||
|
||||
# Add DIST_DIR to PATH so binaries installed for argo are found first
|
||||
export PATH="${DIST_DIR}:${PATH}"
|
||||
|
||||
if test "${ARGOCD_TEST_PARALLELISM:-}" != ""; then
|
||||
TEST_FLAGS="$TEST_FLAGS -p $ARGOCD_TEST_PARALLELISM"
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# Use ./hack/installers/checksums/add-helm-checksums.sh and
|
||||
# add-kustomize-checksums.sh to help download checksums.
|
||||
###############################################################################
|
||||
helm3_version=3.19.2
|
||||
kustomize5_version=5.8.0
|
||||
helm3_version=3.19.4
|
||||
kustomize5_version=5.8.1
|
||||
protoc_version=29.3
|
||||
oras_version=1.2.0
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.3.1
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.3.1
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
12
manifests/core-install-with-hydrator.yaml
generated
12
manifests/core-install-with-hydrator.yaml
generated
@@ -31273,7 +31273,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31408,7 +31408,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31536,7 +31536,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31833,7 +31833,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31886,7 +31886,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32234,7 +32234,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
10
manifests/core-install.yaml
generated
10
manifests/core-install.yaml
generated
@@ -31241,7 +31241,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31370,7 +31370,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31667,7 +31667,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31720,7 +31720,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32068,7 +32068,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.3.1
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: latest
|
||||
newTag: v3.3.1
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
18
manifests/ha/install-with-hydrator.yaml
generated
18
manifests/ha/install-with-hydrator.yaml
generated
@@ -32639,7 +32639,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32774,7 +32774,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32925,7 +32925,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -33021,7 +33021,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -33145,7 +33145,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33468,7 +33468,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33521,7 +33521,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33895,7 +33895,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34279,7 +34279,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/install.yaml
generated
16
manifests/ha/install.yaml
generated
@@ -32609,7 +32609,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32761,7 +32761,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32857,7 +32857,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32981,7 +32981,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33304,7 +33304,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33357,7 +33357,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33731,7 +33731,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34115,7 +34115,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
@@ -1897,7 +1897,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2032,7 +2032,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2183,7 +2183,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2279,7 +2279,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2403,7 +2403,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2726,7 +2726,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2779,7 +2779,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3153,7 +3153,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3537,7 +3537,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/namespace-install.yaml
generated
16
manifests/ha/namespace-install.yaml
generated
@@ -1867,7 +1867,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2019,7 +2019,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2115,7 +2115,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2239,7 +2239,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2562,7 +2562,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2615,7 +2615,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2989,7 +2989,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3373,7 +3373,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/install-with-hydrator.yaml
generated
18
manifests/install-with-hydrator.yaml
generated
@@ -31717,7 +31717,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31852,7 +31852,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32003,7 +32003,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32099,7 +32099,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32201,7 +32201,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32498,7 +32498,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32551,7 +32551,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32923,7 +32923,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33307,7 +33307,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/install.yaml
generated
16
manifests/install.yaml
generated
@@ -31685,7 +31685,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31837,7 +31837,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -31933,7 +31933,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32035,7 +32035,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32332,7 +32332,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32385,7 +32385,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32757,7 +32757,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33141,7 +33141,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/namespace-install-with-hydrator.yaml
generated
18
manifests/namespace-install-with-hydrator.yaml
generated
@@ -975,7 +975,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1110,7 +1110,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1261,7 +1261,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1357,7 +1357,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1459,7 +1459,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1756,7 +1756,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1809,7 +1809,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2181,7 +2181,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2565,7 +2565,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/namespace-install.yaml
generated
16
manifests/namespace-install.yaml
generated
@@ -943,7 +943,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1095,7 +1095,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1191,7 +1191,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1293,7 +1293,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1590,7 +1590,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1643,7 +1643,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2015,7 +2015,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2399,7 +2399,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:latest
|
||||
image: quay.io/argoproj/argocd:v3.3.1
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -94,6 +94,7 @@ nav:
|
||||
- operator-manual/notifications/services/pushover.md
|
||||
- operator-manual/notifications/services/rocketchat.md
|
||||
- operator-manual/notifications/services/slack.md
|
||||
- operator-manual/notifications/services/teams-workflows.md
|
||||
- operator-manual/notifications/services/teams.md
|
||||
- operator-manual/notifications/services/telegram.md
|
||||
- operator-manual/notifications/services/webex.md
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func Test_parseHeaders(t *testing.T) {
|
||||
@@ -39,3 +49,234 @@ func Test_parseGRPCHeaders(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "additional headers must be colon(:)-separated: foo")
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecuteRequest_ClosesBodyOnHTTPError(t *testing.T) {
|
||||
bodyClosed := &atomic.Bool{}
|
||||
|
||||
// Create a test server that returns HTTP 500 error
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create client with custom httpClient that tracks body closure
|
||||
originalTransport := http.DefaultTransport
|
||||
customTransport := &testTransport{
|
||||
base: originalTransport,
|
||||
bodyClosed: bodyClosed,
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:], // Remove "http://"
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Execute request that should fail with HTTP 500
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
_, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
|
||||
|
||||
// Verify error was returned
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed with status code 500")
|
||||
|
||||
// Give a small delay to ensure Close() was called
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify body was closed to prevent connection leak
|
||||
assert.True(t, bodyClosed.Load(), "response body should be closed on HTTP error to prevent connection leak")
|
||||
}
|
||||
|
||||
func TestExecuteRequest_ClosesBodyOnGRPCError(t *testing.T) {
|
||||
bodyClosed := &atomic.Bool{}
|
||||
|
||||
// Create a test server that returns HTTP 200 but with gRPC error status
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Grpc-Status", "3") // codes.InvalidArgument
|
||||
w.Header().Set("Grpc-Message", "invalid argument")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create client with custom httpClient that tracks body closure
|
||||
originalTransport := http.DefaultTransport
|
||||
customTransport := &testTransport{
|
||||
base: originalTransport,
|
||||
bodyClosed: bodyClosed,
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:], // Remove "http://"
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Execute request that should fail with gRPC error
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
_, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
|
||||
|
||||
// Verify gRPC error was returned
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid argument")
|
||||
|
||||
// Give a small delay to ensure Close() was called
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify body was closed to prevent connection leak
|
||||
assert.True(t, bodyClosed.Load(), "response body should be closed on gRPC error to prevent connection leak")
|
||||
}
|
||||
|
||||
func TestExecuteRequest_ConcurrentErrorRequests_NoConnectionLeak(t *testing.T) {
|
||||
// This test simulates the scenario from the test script:
|
||||
// Multiple concurrent requests that fail should all close their response bodies
|
||||
|
||||
var totalRequests atomic.Int32
|
||||
var closedBodies atomic.Int32
|
||||
|
||||
// Create a test server that always returns errors
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
totalRequests.Add(1)
|
||||
// Alternate between HTTP errors and gRPC errors
|
||||
if totalRequests.Load()%2 == 0 {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
w.Header().Set("Grpc-Status", strconv.Itoa(int(codes.PermissionDenied)))
|
||||
w.Header().Set("Grpc-Message", "permission denied")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create client with custom transport that tracks closures
|
||||
customTransport := &testTransport{
|
||||
base: http.DefaultTransport,
|
||||
bodyClosed: &atomic.Bool{},
|
||||
onClose: func() {
|
||||
closedBodies.Add(1)
|
||||
},
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:],
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Simulate concurrent requests like in the test script
|
||||
concurrency := 10
|
||||
iterations := 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for iter := 0; iter < iterations; iter++ {
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
_, err := c.executeRequest(ctx, "/application.ApplicationService/ManagedResources", []byte("test"), md)
|
||||
// We expect errors
|
||||
assert.Error(t, err)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Give time for all Close() calls to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify all response bodies were closed
|
||||
expectedTotal := int32(concurrency * iterations)
|
||||
assert.Equal(t, expectedTotal, totalRequests.Load(), "all requests should have been made")
|
||||
assert.Equal(t, expectedTotal, closedBodies.Load(), "all response bodies should be closed to prevent connection leaks")
|
||||
}
|
||||
|
||||
func TestExecuteRequest_SuccessDoesNotCloseBodyPrematurely(t *testing.T) {
|
||||
// Verify that successful requests do NOT close the body in executeRequest
|
||||
// (caller is responsible for closing in success case)
|
||||
|
||||
bodyClosed := &atomic.Bool{}
|
||||
|
||||
// Create a test server that returns success
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Grpc-Status", "0") // codes.OK
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
customTransport := &testTransport{
|
||||
base: http.DefaultTransport,
|
||||
bodyClosed: bodyClosed,
|
||||
}
|
||||
|
||||
c := &client{
|
||||
ServerAddr: server.URL[7:],
|
||||
PlainText: true,
|
||||
httpClient: &http.Client{
|
||||
Transport: customTransport,
|
||||
},
|
||||
GRPCWebRootPath: "",
|
||||
}
|
||||
|
||||
// Execute successful request
|
||||
ctx := context.Background()
|
||||
md := metadata.New(map[string]string{})
|
||||
resp, err := c.executeRequest(ctx, "/test.Service/Method", []byte("test"), md)
|
||||
|
||||
// Verify success
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Verify body was NOT closed by executeRequest (caller's responsibility)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
assert.False(t, bodyClosed.Load(), "response body should NOT be closed by executeRequest on success - caller is responsible")
|
||||
}
|
||||
|
||||
// testTransport wraps http.RoundTripper to track body closures
|
||||
type testTransport struct {
|
||||
base http.RoundTripper
|
||||
bodyClosed *atomic.Bool
|
||||
onClose func() // Optional callback for each close
|
||||
}
|
||||
|
||||
func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
resp, err := t.base.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap the response body to track Close() calls
|
||||
resp.Body = &closeTracker{
|
||||
ReadCloser: resp.Body,
|
||||
closed: t.bodyClosed,
|
||||
onClose: t.onClose,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type closeTracker struct {
|
||||
io.ReadCloser
|
||||
closed *atomic.Bool
|
||||
onClose func()
|
||||
}
|
||||
|
||||
func (c *closeTracker) Close() error {
|
||||
c.closed.Store(true)
|
||||
if c.onClose != nil {
|
||||
c.onClose()
|
||||
}
|
||||
return c.ReadCloser.Close()
|
||||
}
|
||||
|
||||
@@ -86,6 +86,9 @@ func (c *client) executeRequest(ctx context.Context, fullMethodName string, msg
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.Body != nil {
|
||||
utilio.Close(resp.Body)
|
||||
}
|
||||
return nil, fmt.Errorf("%s %s failed with status code %d", req.Method, req.URL, resp.StatusCode)
|
||||
}
|
||||
var code codes.Code
|
||||
@@ -97,6 +100,9 @@ func (c *client) executeRequest(ctx context.Context, fullMethodName string, msg
|
||||
code = codes.Code(statusInt)
|
||||
}
|
||||
if code != codes.OK {
|
||||
if resp.Body != nil {
|
||||
utilio.Close(resp.Body)
|
||||
}
|
||||
return nil, status.Error(code, resp.Header.Get("Grpc-Message"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,6 +104,10 @@ type ApplicationSpec struct {
|
||||
type IgnoreDifferences []ResourceIgnoreDifferences
|
||||
|
||||
func (id IgnoreDifferences) Equals(other IgnoreDifferences) bool {
|
||||
// Treat nil and empty slice as equivalent
|
||||
if len(id) == 0 && len(other) == 0 {
|
||||
return true
|
||||
}
|
||||
return reflect.DeepEqual(id, other)
|
||||
}
|
||||
|
||||
|
||||
@@ -4865,3 +4865,72 @@ func TestSourceHydrator_Equals(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoreDifferences_Equals(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a IgnoreDifferences
|
||||
b IgnoreDifferences
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "nil and nil are equal",
|
||||
a: nil,
|
||||
b: nil,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "nil and empty slice are equal",
|
||||
a: nil,
|
||||
b: IgnoreDifferences{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "empty slice and nil are equal",
|
||||
a: IgnoreDifferences{},
|
||||
b: nil,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "empty slice and empty slice are equal",
|
||||
a: IgnoreDifferences{},
|
||||
b: IgnoreDifferences{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "non-empty slice and nil are not equal",
|
||||
a: IgnoreDifferences{{Kind: "Deployment"}},
|
||||
b: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "nil and non-empty slice are not equal",
|
||||
a: nil,
|
||||
b: IgnoreDifferences{{Kind: "Deployment"}},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "equal non-empty slices are equal",
|
||||
a: IgnoreDifferences{{Kind: "Deployment", JSONPointers: []string{"/spec/replicas"}}},
|
||||
b: IgnoreDifferences{{Kind: "Deployment", JSONPointers: []string{"/spec/replicas"}}},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "different non-empty slices are not equal",
|
||||
a: IgnoreDifferences{{Kind: "Deployment"}},
|
||||
b: IgnoreDifferences{{Kind: "Service"}},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
testCopy := testCase
|
||||
t.Run(testCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
assert.Equal(t, testCopy.expected, testCopy.a.Equals(testCopy.b))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2455,7 +2455,7 @@ func (s *Server) TerminateOperation(ctx context.Context, termOpReq *application.
|
||||
}
|
||||
log.Warnf("failed to set operation for app %q due to update conflict. retrying again...", *termOpReq.Name)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
_, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
|
||||
a, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting application by name: %w", err)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
k8sbatchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -4582,3 +4583,75 @@ func TestServerSideDiff(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "application")
|
||||
})
|
||||
}
|
||||
|
||||
// TestTerminateOperationWithConflicts tests that TerminateOperation properly handles
|
||||
// concurrent update conflicts by retrying with the fresh application object.
|
||||
//
|
||||
// This test reproduces a bug where the retry loop discards the fresh app object
|
||||
// fetched from Get(), causing all retries to fail with stale resource versions.
|
||||
func TestTerminateOperationWithConflicts(t *testing.T) {
|
||||
testApp := newTestApp()
|
||||
testApp.ResourceVersion = "1"
|
||||
testApp.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
}
|
||||
testApp.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: *testApp.Operation,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
|
||||
appServer := newTestAppServer(t, testApp)
|
||||
ctx := context.Background()
|
||||
|
||||
// Get the fake clientset from the deepCopy wrapper
|
||||
fakeAppCs := appServer.appclientset.(*deepCopyAppClientset).GetUnderlyingClientSet().(*apps.Clientset)
|
||||
|
||||
getCallCount := 0
|
||||
updateCallCount := 0
|
||||
|
||||
// Remove default reactors and add our custom ones
|
||||
fakeAppCs.ReactionChain = nil
|
||||
|
||||
// Mock Get to return original version first, then fresh version
|
||||
fakeAppCs.AddReactor("get", "applications", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
getCallCount++
|
||||
freshApp := testApp.DeepCopy()
|
||||
if getCallCount == 1 {
|
||||
// First Get (for initialization) returns original version
|
||||
freshApp.ResourceVersion = "1"
|
||||
} else {
|
||||
// Subsequent Gets (during retry) return fresh version
|
||||
freshApp.ResourceVersion = "2"
|
||||
}
|
||||
return true, freshApp, nil
|
||||
})
|
||||
|
||||
// Mock Update to return conflict on first call, success on second
|
||||
fakeAppCs.AddReactor("update", "applications", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
updateCallCount++
|
||||
updateAction := action.(kubetesting.UpdateAction)
|
||||
app := updateAction.GetObject().(*v1alpha1.Application)
|
||||
|
||||
// First call (with original resource version): return conflict
|
||||
if app.ResourceVersion == "1" {
|
||||
return true, nil, apierrors.NewConflict(
|
||||
schema.GroupResource{Group: "argoproj.io", Resource: "applications"},
|
||||
app.Name,
|
||||
stderrors.New("the object has been modified"),
|
||||
)
|
||||
}
|
||||
|
||||
// Second call (with refreshed resource version from Get): return success
|
||||
updatedApp := app.DeepCopy()
|
||||
return true, updatedApp, nil
|
||||
})
|
||||
|
||||
// Attempt to terminate the operation
|
||||
_, err := appServer.TerminateOperation(ctx, &application.OperationTerminateRequest{
|
||||
Name: ptr.To(testApp.Name),
|
||||
})
|
||||
|
||||
// Should succeed after retrying with the fresh app
|
||||
require.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, updateCallCount, 2, "Update should be called at least twice (once with conflict, once with success)")
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ sonar.projectVersion=1.0
|
||||
# Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows.
|
||||
sonar.sources=.
|
||||
|
||||
sonar.exclusions=docs/**
|
||||
# Exclude docs and testdata with kustomization files (Sonar IaC parser fails on empty/edge-case YAML)
|
||||
sonar.exclusions=docs/**,**/testdata/**
|
||||
|
||||
# Encoding of the source code. Default is default system encoding
|
||||
sonar.sourceEncoding=UTF-8
|
||||
@@ -24,5 +25,5 @@ sonar.go.exclusions=**/vendor/**,**/*.pb.go,**/*_test.go,**/*.pb.gw.go,**/mocks/
|
||||
# Exclude following set of patterns from duplication detection
|
||||
sonar.cpd.exclusions=**/*.pb.go,**/*.g.cs,**/*.gw.go,**/mocks/*,docs/**
|
||||
|
||||
# Exclude test manifests from analysis
|
||||
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**
|
||||
# Exclude test manifests from analysis (avoids Sonar IaC parser errors on empty/edge-case kustomization files)
|
||||
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**,util/app/discovery/testdata/**,reposerver/repository/testdata/**
|
||||
|
||||
@@ -11,5 +11,5 @@ fcgiwrap: sudo sh -c "test $ARGOCD_E2E_TEST = true && (fcgiwrap -s unix:/var/run
|
||||
nginx: sudo sh -c "test $ARGOCD_E2E_TEST = true && nginx -g 'daemon off;' -c $(pwd)/test/fixture/testrepos/nginx.conf"
|
||||
helm-registry: sudo sh -c "OTEL_TRACES_EXPORTER=none REGISTRY_LOG_LEVEL=info registry serve /etc/docker/registry/config.yml"
|
||||
dev-mounter: test "$ARGOCD_E2E_TEST" != "true" && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS=${ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS:-true} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications go run ./cmd/main.go --loglevel debug"
|
||||
|
||||
@@ -19,7 +19,10 @@ func TestBackupExportImport(t *testing.T) {
|
||||
var exportRawOutput string
|
||||
ctx := Given(t)
|
||||
// Create application in argocd namespace
|
||||
appctx := appfixture.GivenWithSameState(t)
|
||||
appctx := appfixture.GivenWithSameState(ctx)
|
||||
|
||||
var appTestNamespace Application
|
||||
var appOtherNamespace Application
|
||||
|
||||
// Create application in test namespace
|
||||
appctx.
|
||||
@@ -29,8 +32,9 @@ func TestBackupExportImport(t *testing.T) {
|
||||
CreateApp().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, "exported-app1", app.Name)
|
||||
assert.Equal(t, fixture.TestNamespace(), app.Namespace)
|
||||
assert.Equal(t, appctx.AppName(), app.Name)
|
||||
assert.Equal(t, appctx.AppNamespace(), app.Namespace)
|
||||
appTestNamespace = *app
|
||||
})
|
||||
|
||||
// Create app in other namespace
|
||||
@@ -42,8 +46,9 @@ func TestBackupExportImport(t *testing.T) {
|
||||
CreateApp().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, "exported-app-other-namespace", app.Name)
|
||||
assert.Equal(t, fixture.AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, appctx.AppName(), app.Name)
|
||||
assert.Equal(t, appctx.AppNamespace(), app.Namespace)
|
||||
appOtherNamespace = *app
|
||||
})
|
||||
|
||||
ctx.
|
||||
@@ -57,8 +62,8 @@ func TestBackupExportImport(t *testing.T) {
|
||||
AndExportedResources(func(exportResources *ExportedResources, err error) {
|
||||
require.NoError(t, err, "export format not valid")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey("", "ConfigMap", "", "argocd-cm")), "argocd-cm not found in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, "", "exported-app1")), "test namespace application not in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, fixture.AppNamespace(), "exported-app-other-namespace")), "app namespace application not in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, "", appTestNamespace.GetName())), "test namespace application not in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, appOtherNamespace.GetNamespace(), appOtherNamespace.GetName())), "app namespace application not in export")
|
||||
})
|
||||
|
||||
// Test import - clean state
|
||||
@@ -70,9 +75,9 @@ func TestBackupExportImport(t *testing.T) {
|
||||
Then().
|
||||
AndCLIOutput(func(_ string, err error) {
|
||||
require.NoError(t, err, "import finished with error")
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.TestNamespace()).Get(t.Context(), "exported-app1", metav1.GetOptions{})
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(appTestNamespace.GetNamespace()).Get(t.Context(), appTestNamespace.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err, "failed getting test namespace application after import")
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.AppNamespace()).Get(t.Context(), "exported-app-other-namespace", metav1.GetOptions{})
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(appOtherNamespace.GetNamespace()).Get(t.Context(), appOtherNamespace.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err, "failed getting app namespace application after import")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func TestNSAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
Given(t).
|
||||
SetTrackingMethod("annotation").
|
||||
ctx := Given(t)
|
||||
ctx.SetTrackingMethod("annotation").
|
||||
Path(guestbookPath).
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
// TODO: There is a bug with annotation tracking method that prevents
|
||||
@@ -37,7 +37,7 @@ func TestNSAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
// app should not be auto-synced if k8s change detected
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Then().
|
||||
@@ -45,8 +45,8 @@ func TestNSAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNSAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
Given(t).
|
||||
SetTrackingMethod("annotation").
|
||||
ctx := Given(t)
|
||||
ctx.SetTrackingMethod("annotation").
|
||||
Path(guestbookPath).
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
When().
|
||||
@@ -63,7 +63,7 @@ func TestNSAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
When().
|
||||
// app should be auto-synced once k8s change detected
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
)
|
||||
|
||||
func TestAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
Given(t).
|
||||
Path(guestbookPath).
|
||||
ctx := Given(t)
|
||||
ctx.Path(guestbookPath).
|
||||
When().
|
||||
// app should be auto-synced once created
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -36,7 +36,7 @@ func TestAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
// app should not be auto-synced if k8s change detected
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
@@ -45,8 +45,8 @@ func TestAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
Given(t).
|
||||
Path(guestbookPath).
|
||||
ctx := Given(t)
|
||||
ctx.Path(guestbookPath).
|
||||
When().
|
||||
// app should be auto-synced once created
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -61,7 +61,7 @@ func TestAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
When().
|
||||
// app should be auto-synced once k8s change detected
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
|
||||
@@ -48,8 +48,8 @@ func TestNamespacedGetLogsAllow(_ *testing.T) {
|
||||
func TestNamespacedGetLogsDeny(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
accountCtx := accountFixture.Given(t)
|
||||
accountCtx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -76,7 +76,7 @@ func TestNamespacedGetLogsDeny(t *testing.T) {
|
||||
},
|
||||
}, "app-creator")
|
||||
|
||||
ctx := GivenWithSameState(t)
|
||||
ctx := GivenWithSameState(accountCtx)
|
||||
ctx.SetAppNamespace(fixture.ArgoCDAppNamespace)
|
||||
ctx.
|
||||
Path("guestbook-logs").
|
||||
@@ -95,8 +95,8 @@ func TestNamespacedGetLogsDeny(t *testing.T) {
|
||||
func TestNamespacedGetLogsAllowNS(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
accountCtx := accountFixture.Given(t)
|
||||
accountCtx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -128,7 +128,7 @@ func TestNamespacedGetLogsAllowNS(t *testing.T) {
|
||||
},
|
||||
}, "app-creator")
|
||||
|
||||
ctx := GivenWithSameState(t)
|
||||
ctx := GivenWithSameState(accountCtx)
|
||||
ctx.SetAppNamespace(fixture.AppNamespace())
|
||||
ctx.
|
||||
Path("guestbook-logs").
|
||||
@@ -220,11 +220,11 @@ func TestNamespacedAppCreation(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, fixture.Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
assert.Equal(t, fixture.AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, fixture.RepoURL(fixture.RepoURLTypeFile), app.Spec.GetSource().RepoURL)
|
||||
assert.Equal(t, guestbookPath, app.Spec.GetSource().Path)
|
||||
assert.Equal(t, fixture.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(NamespacedEvent(fixture.AppNamespace(), EventReasonResourceCreated, "create")).
|
||||
@@ -272,7 +272,7 @@ func TestNamespacedAppCreationWithoutForceUpdate(t *testing.T) {
|
||||
assert.Equal(t, fixture.AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, fixture.RepoURL(fixture.RepoURLTypeFile), app.Spec.GetSource().RepoURL)
|
||||
assert.Equal(t, guestbookPath, app.Spec.GetSource().Path)
|
||||
assert.Equal(t, fixture.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, "in-cluster", app.Spec.Destination.Name)
|
||||
}).
|
||||
Expect(NamespacedEvent(fixture.AppNamespace(), EventReasonResourceCreated, "create")).
|
||||
@@ -314,7 +314,8 @@ func TestNamespacedDeleteAppResource(t *testing.T) {
|
||||
// demonstrate that we cannot use a standard sync when an immutable field is changed, we must use "force"
|
||||
func TestNamespacedImmutableChange(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("secrets").
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
@@ -338,7 +339,7 @@ func TestNamespacedImmutableChange(t *testing.T) {
|
||||
Expect(ResourceResultMatches(ResourceResult{
|
||||
Kind: "Secret",
|
||||
Version: "v1",
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
Namespace: ctx.DeploymentNamespace(),
|
||||
Name: "test-secret",
|
||||
SyncPhase: "Sync",
|
||||
Status: "SyncFailed",
|
||||
@@ -394,16 +395,17 @@ func TestNamespacedAppDeletion(t *testing.T) {
|
||||
|
||||
func TestNamespacedAppLabels(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
label := "id=" + ctx.ShortID()
|
||||
ctx.
|
||||
Path("config-map").
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
When().
|
||||
CreateApp("-l", "foo=bar").
|
||||
CreateApp("-l", label).
|
||||
Then().
|
||||
And(func(_ *Application) {
|
||||
assert.Contains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list")), ctx.AppQualifiedName())
|
||||
assert.Contains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list", "-l", "foo=bar")), ctx.AppQualifiedName())
|
||||
assert.Contains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list", "-l", label)), ctx.AppQualifiedName())
|
||||
assert.NotContains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list", "-l", "foo=rubbish")), ctx.AppQualifiedName())
|
||||
}).
|
||||
Given().
|
||||
@@ -418,11 +420,12 @@ func TestNamespacedAppLabels(t *testing.T) {
|
||||
// check we can update the app and it is then sync'd
|
||||
Given().
|
||||
When().
|
||||
Sync("-l", "foo=bar")
|
||||
Sync("-l", label)
|
||||
}
|
||||
|
||||
func TestNamespacedTrackAppStateAndSyncApp(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path(guestbookPath).
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
@@ -433,8 +436,8 @@ func TestNamespacedTrackAppStateAndSyncApp(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced ", fixture.DeploymentNamespace()))).
|
||||
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced", fixture.DeploymentNamespace()))).
|
||||
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced ", ctx.DeploymentNamespace()))).
|
||||
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced", ctx.DeploymentNamespace()))).
|
||||
Expect(NamespacedEvent(fixture.AppNamespace(), EventReasonResourceUpdated, "sync")).
|
||||
And(func(app *Application) {
|
||||
assert.NotNil(t, app.Status.OperationState.SyncResult)
|
||||
@@ -604,12 +607,12 @@ func TestNamespacedAppWithSecrets(t *testing.T) {
|
||||
_, err = fixture.RunCli("app", "patch-resource", ctx.AppQualifiedName(), "--resource-name", "test-secret",
|
||||
"--kind", "Secret", "--patch", `{"op": "add", "path": "/data", "value": "hello"}'`,
|
||||
"--patch-type", "application/json-patch+json")
|
||||
require.ErrorContains(t, err, fmt.Sprintf("failed to patch Secret %s/test-secret", fixture.DeploymentNamespace()))
|
||||
require.ErrorContains(t, err, fmt.Sprintf("failed to patch Secret %s/test-secret", ctx.DeploymentNamespace()))
|
||||
assert.NotContains(t, err.Error(), "username")
|
||||
assert.NotContains(t, err.Error(), "password")
|
||||
|
||||
// patch secret and make sure app is out of sync and diff detects the change
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Secrets(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Secrets(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"test-secret", types.JSONPatchType, []byte(`[
|
||||
{"op": "remove", "path": "/data/username"},
|
||||
{"op": "add", "path": "/stringData", "value": {"password": "foo"}}
|
||||
@@ -673,7 +676,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
// Patch deployment
|
||||
_, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
_, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.JSONPatchType, []byte(`[{ "op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "test" }]`), metav1.PatchOptions{})
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
@@ -684,7 +687,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
And(func(_ *Application) {
|
||||
diffOutput, err := fixture.RunCli("app", "diff", ctx.AppQualifiedName(), "--local-repo-root", ".", "--local", "testdata/guestbook")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", fixture.DeploymentNamespace()))
|
||||
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", ctx.DeploymentNamespace()))
|
||||
}).
|
||||
Given().
|
||||
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {
|
||||
@@ -713,7 +716,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
}]`).
|
||||
Sync().
|
||||
And(func() {
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", fixture.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", ctx.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "serverside-applied")
|
||||
}).
|
||||
@@ -740,12 +743,12 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
"value": { "syncOptions": ["RespectIgnoreDifferences=true"] }
|
||||
}]`).
|
||||
And(func() {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(3), *deployment.Spec.RevisionHistoryLimit)
|
||||
}).
|
||||
And(func() {
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", fixture.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", ctx.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "serverside-applied")
|
||||
}).
|
||||
@@ -754,13 +757,13 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(1), *deployment.Spec.RevisionHistoryLimit)
|
||||
}).
|
||||
When().Sync().Then().Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(1), *deployment.Spec.RevisionHistoryLimit)
|
||||
})
|
||||
@@ -782,7 +785,7 @@ func TestNamespacedKnownTypesInCRDDiffing(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
When().
|
||||
And(func() {
|
||||
dummyResIf := fixture.DynamicClientset.Resource(dummiesGVR).Namespace(fixture.DeploymentNamespace())
|
||||
dummyResIf := fixture.DynamicClientset.Resource(dummiesGVR).Namespace(ctx.DeploymentNamespace())
|
||||
patchData := []byte(`{"spec":{"cpu": "2"}}`)
|
||||
errors.NewHandler(t).FailOnErr(dummyResIf.Patch(t.Context(), "dummy-crd-instance", types.MergePatchType, patchData, metav1.PatchOptions{}))
|
||||
}).Refresh(RefreshTypeNormal).
|
||||
@@ -869,7 +872,7 @@ func TestNamespacedResourceAction(t *testing.T) {
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
Version: ptr.To("v1"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(ctx.DeploymentNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -880,14 +883,14 @@ func TestNamespacedResourceAction(t *testing.T) {
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
Version: ptr.To("v1"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(ctx.DeploymentNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Action: ptr.To("sample"),
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "test", deployment.Labels["sample"])
|
||||
@@ -1023,7 +1026,7 @@ func TestNamespacedSyncAsync(t *testing.T) {
|
||||
}
|
||||
|
||||
// assertResourceActions verifies if view/modify resource actions are successful/failing for given application
|
||||
func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
func assertNSResourceActions(t *testing.T, appName string, deploymentNamespace string, successful bool) {
|
||||
t.Helper()
|
||||
assertError := func(err error, message string) {
|
||||
if successful {
|
||||
@@ -1036,7 +1039,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
closer, cdClient := fixture.ArgoCDClientset.NewApplicationClientOrDie()
|
||||
defer utilio.Close(closer)
|
||||
|
||||
deploymentResource, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deploymentResource, err := fixture.KubeClientset.AppsV1().Deployments(deploymentNamespace).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
logs, err := cdClient.PodLogs(t.Context(), &applicationpkg.ApplicationPodLogsQuery{
|
||||
@@ -1044,7 +1047,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Kind: ptr.To("Deployment"),
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Container: ptr.To(""),
|
||||
SinceSeconds: ptr.To(int64(0)),
|
||||
TailLines: ptr.To(int64(0)),
|
||||
@@ -1060,7 +1063,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
ResourceNamespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
ResourceNamespace: ptr.To(deploymentNamespace),
|
||||
ResourceUID: ptr.To(string(deploymentResource.UID)),
|
||||
})
|
||||
assertError(err, fmt.Sprintf("%s not found as part of application %s", "guestbook-ui", appName))
|
||||
@@ -1069,7 +1072,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Version: ptr.To("v1"),
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
@@ -1080,7 +1083,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Version: ptr.To("v1"),
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
@@ -1092,7 +1095,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Version: ptr.To("v1"),
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
@@ -1102,22 +1105,20 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
|
||||
func TestNamespacedPermissions(t *testing.T) {
|
||||
appCtx := Given(t)
|
||||
projName := "argo-project"
|
||||
projActions := projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
projCtx := projectFixture.GivenWithSameState(appCtx)
|
||||
projActions := projCtx.
|
||||
SourceNamespaces([]string{fixture.AppNamespace()}).
|
||||
When().
|
||||
Create()
|
||||
|
||||
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'argo-project'", fixture.RepoURL(fixture.RepoURLTypeFile))
|
||||
destinationError := fmt.Sprintf("application destination server '%s' and namespace '%s' do not match any of the allowed destinations in project 'argo-project'", KubernetesInternalAPIServerAddr, fixture.DeploymentNamespace())
|
||||
sourceError := fmt.Sprintf("application repo %s is not permitted in project '%s'", fixture.RepoURL(fixture.RepoURLTypeFile), projCtx.GetName())
|
||||
destinationError := fmt.Sprintf("application destination server '%s' and namespace '%s' do not match any of the allowed destinations in project '%s'", KubernetesInternalAPIServerAddr, appCtx.DeploymentNamespace(), projCtx.GetName())
|
||||
|
||||
appCtx.
|
||||
Path("guestbook-logs").
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
Project(projName).
|
||||
Project(projCtx.GetName()).
|
||||
When().
|
||||
IgnoreErrors().
|
||||
// ensure app is not created if project permissions are missing
|
||||
@@ -1138,7 +1139,7 @@ func TestNamespacedPermissions(t *testing.T) {
|
||||
Then().
|
||||
// make sure application resource actions are successful
|
||||
And(func(app *Application) {
|
||||
assertNSResourceActions(t, app.Name, true)
|
||||
assertNSResourceActions(t, app.Name, appCtx.DeploymentNamespace(), true)
|
||||
}).
|
||||
When().
|
||||
// remove projet permissions and "refresh" app
|
||||
@@ -1175,29 +1176,27 @@ func TestNamespacedPermissions(t *testing.T) {
|
||||
Then().
|
||||
// make sure application resource actions are failing
|
||||
And(func(app *Application) {
|
||||
assertNSResourceActions(t, app.Name, false)
|
||||
assertNSResourceActions(t, app.Name, appCtx.DeploymentNamespace(), false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNamespacedPermissionWithScopedRepo(t *testing.T) {
|
||||
projName := "argo-project"
|
||||
fixture.EnsureCleanState(t)
|
||||
projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
ctx := Given(t)
|
||||
projCtx := projectFixture.GivenWithSameState(ctx)
|
||||
projCtx.
|
||||
SourceNamespaces([]string{fixture.AppNamespace()}).
|
||||
Destination("*,*").
|
||||
When().
|
||||
Create()
|
||||
|
||||
repoFixture.GivenWithSameState(t).
|
||||
repoFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
Path(fixture.RepoURL(fixture.RepoURLTypeFile)).
|
||||
Project(projName).
|
||||
Project(projCtx.GetName()).
|
||||
Create()
|
||||
|
||||
GivenWithSameState(t).
|
||||
Project(projName).
|
||||
GivenWithSameState(ctx).
|
||||
Project(projCtx.GetName()).
|
||||
RepoURLType(fixture.RepoURLTypeFile).
|
||||
Path("two-nice-pods").
|
||||
SetTrackingMethod("annotation").
|
||||
@@ -1221,22 +1220,19 @@ func TestNamespacedPermissionWithScopedRepo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNamespacedPermissionDeniedWithScopedRepo(t *testing.T) {
|
||||
projName := "argo-project"
|
||||
projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
Destination("*,*").
|
||||
ctx := projectFixture.Given(t)
|
||||
ctx.Destination("*,*").
|
||||
SourceNamespaces([]string{fixture.AppNamespace()}).
|
||||
When().
|
||||
Create()
|
||||
|
||||
repoFixture.GivenWithSameState(t).
|
||||
repoFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
Path(fixture.RepoURL(fixture.RepoURLTypeFile)).
|
||||
Create()
|
||||
|
||||
GivenWithSameState(t).
|
||||
Project(projName).
|
||||
GivenWithSameState(ctx).
|
||||
Project(ctx.GetName()).
|
||||
RepoURLType(fixture.RepoURLTypeFile).
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
@@ -1409,7 +1405,8 @@ func TestNamespacedRevisionHistoryLimit(t *testing.T) {
|
||||
|
||||
func TestNamespacedOrphanedResource(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
ProjectSpec(AppProjectSpec{
|
||||
SourceRepos: []string{"*"},
|
||||
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
|
||||
@@ -1427,7 +1424,7 @@ func TestNamespacedOrphanedResource(t *testing.T) {
|
||||
Expect(NoConditions()).
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(fixture.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(ctx.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "orphaned-configmap",
|
||||
},
|
||||
@@ -1513,7 +1510,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sample-ingress",
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:networking/Ingress:%s/sample-ingress", fixture.AppNamespace(), ctx.AppName(), fixture.DeploymentNamespace()),
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:networking/Ingress:%s/sample-ingress", fixture.AppNamespace(), ctx.AppName(), ctx.DeploymentNamespace()),
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
@@ -1544,7 +1541,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "guestbook-ui",
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:Service:%s/guesbook-ui", fixture.TestNamespace(), ctx.AppQualifiedName(), fixture.DeploymentNamespace()),
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:Service:%s/guesbook-ui", fixture.TestNamespace(), ctx.AppQualifiedName(), ctx.DeploymentNamespace()),
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
@@ -1560,7 +1557,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
|
||||
ctx.ProjectSpec(AppProjectSpec{
|
||||
SourceRepos: []string{"*"},
|
||||
Destinations: []ApplicationDestination{{Namespace: fixture.DeploymentNamespace(), Server: "*"}},
|
||||
Destinations: []ApplicationDestination{{Namespace: ctx.DeploymentNamespace(), Server: "*"}},
|
||||
SourceNamespaces: []string{fixture.AppNamespace()},
|
||||
NamespaceResourceBlacklist: []metav1.GroupKind{
|
||||
{Group: "", Kind: "Service"},
|
||||
@@ -1568,7 +1565,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
}).
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.NetworkingV1().Ingresses(fixture.TestNamespace()).Create(t.Context(), ingress, metav1.CreateOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(fixture.DeploymentNamespace()).Create(t.Context(), svc, metav1.CreateOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(ctx.DeploymentNamespace()).Create(t.Context(), svc, metav1.CreateOptions{}))
|
||||
}).
|
||||
Path(guestbookPath).
|
||||
When().
|
||||
@@ -1594,7 +1591,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
|
||||
// Make sure prohibited resources are not deleted during application deletion
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.NetworkingV1().Ingresses(fixture.TestNamespace()).Get(t.Context(), "sample-ingress", metav1.GetOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{}))
|
||||
}
|
||||
|
||||
func TestNamespacedSyncWithInfos(t *testing.T) {
|
||||
@@ -1694,7 +1691,8 @@ func TestNamespacedCreateAppWithNoNameSpaceWhenRequired2(t *testing.T) {
|
||||
|
||||
func TestNamespacedListResource(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
SetTrackingMethod("annotation").
|
||||
ProjectSpec(AppProjectSpec{
|
||||
@@ -1712,7 +1710,7 @@ func TestNamespacedListResource(t *testing.T) {
|
||||
Expect(NoConditions()).
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(fixture.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(ctx.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "orphaned-configmap",
|
||||
},
|
||||
@@ -1970,7 +1968,7 @@ metadata:
|
||||
labels:
|
||||
test: "true"
|
||||
annotations:
|
||||
something: "whatevs"
|
||||
something: "whatevs"
|
||||
`
|
||||
s := fmt.Sprintf(existingNs, updatedNamespace)
|
||||
|
||||
@@ -2087,7 +2085,8 @@ func TestNamespacedFailedSyncWithRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNamespacedCreateDisableValidation(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
SetTrackingMethod("annotation").
|
||||
Path("baddir").
|
||||
@@ -2096,7 +2095,7 @@ func TestNamespacedCreateDisableValidation(t *testing.T) {
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
_, err := fixture.RunCli("app", "create", app.QualifiedName(), "--upsert", "--validate=false", "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
"--path", "baddir2", "--project", app.Spec.Project, "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", fixture.DeploymentNamespace())
|
||||
"--path", "baddir2", "--project", app.Spec.Project, "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", ctx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
When().
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,12 +28,12 @@ func TestMultiSourceAppCreation(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
@@ -41,7 +41,7 @@ func TestMultiSourceAppCreation(t *testing.T) {
|
||||
// app should be listed
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, Name())
|
||||
assert.Contains(t, output, ctx.GetName())
|
||||
}).
|
||||
Expect(Success("")).
|
||||
Given().Timeout(60).
|
||||
@@ -83,12 +83,12 @@ func TestMultiSourceAppWithHelmExternalValueFiles(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
@@ -96,7 +96,7 @@ func TestMultiSourceAppWithHelmExternalValueFiles(t *testing.T) {
|
||||
// app should be listed
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, Name())
|
||||
assert.Contains(t, output, ctx.GetName())
|
||||
}).
|
||||
Expect(Success("")).
|
||||
Given().Timeout(60).
|
||||
@@ -111,7 +111,7 @@ func TestMultiSourceAppWithHelmExternalValueFiles(t *testing.T) {
|
||||
assert.Equal(t, SyncStatusCodeSynced, statusByName["guestbook-ui"])
|
||||
|
||||
// Confirm that the deployment has 3 replicas.
|
||||
output, err := Run("", "kubectl", "get", "deployment", "guestbook-ui", "-n", DeploymentNamespace(), "-o", "jsonpath={.spec.replicas}")
|
||||
output, err := Run("", "kubectl", "get", "deployment", "guestbook-ui", "-n", ctx.DeploymentNamespace(), "-o", "jsonpath={.spec.replicas}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3", output, "Expected 3 replicas for the helm-guestbook deployment")
|
||||
})
|
||||
@@ -135,12 +135,12 @@ func TestMultiSourceAppWithSourceOverride(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
@@ -148,7 +148,7 @@ func TestMultiSourceAppWithSourceOverride(t *testing.T) {
|
||||
// app should be listed
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, Name())
|
||||
assert.Contains(t, output, ctx.GetName())
|
||||
}).
|
||||
Expect(Success("")).
|
||||
Given().Timeout(60).
|
||||
@@ -166,7 +166,7 @@ func TestMultiSourceAppWithSourceOverride(t *testing.T) {
|
||||
assert.Equal(t, SyncStatusCodeSynced, statusByName["guestbook-ui"])
|
||||
|
||||
// check if label was added to the pod to make sure resource was taken from the later source
|
||||
output, err := Run("", "kubectl", "describe", "pods", "pod-1", "-n", DeploymentNamespace())
|
||||
output, err := Run("", "kubectl", "describe", "pods", "pod-1", "-n", ctx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "foo=bar")
|
||||
})
|
||||
@@ -189,19 +189,19 @@ func TestMultiSourceAppWithSourceName(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
assert.Equal(t, sources[i].Name, source.Name)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
And(func(_ *Application) {
|
||||
// we remove the first source
|
||||
output, err := RunCli("app", "remove-source", Name(), "--source-name", sources[0].Name)
|
||||
output, err := RunCli("app", "remove-source", ctx.GetName(), "--source-name", sources[0].Name)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "updated successfully")
|
||||
}).
|
||||
@@ -209,7 +209,7 @@ func TestMultiSourceAppWithSourceName(t *testing.T) {
|
||||
And(func(app *Application) {
|
||||
assert.Len(t, app.Spec.GetSources(), 1)
|
||||
// we add a source
|
||||
output, err := RunCli("app", "add-source", Name(), "--source-name", sources[0].Name, "--repo", RepoURL(RepoURLTypeFile), "--path", guestbookPath)
|
||||
output, err := RunCli("app", "add-source", ctx.GetName(), "--source-name", sources[0].Name, "--repo", RepoURL(RepoURLTypeFile), "--path", guestbookPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "updated successfully")
|
||||
}).
|
||||
@@ -251,18 +251,18 @@ func TestMultiSourceAppSetWithSourceName(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
assert.Equal(t, sources[i].Name, source.Name)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
And(func(_ *Application) {
|
||||
_, err := RunCli("app", "set", Name(), "--source-name", sources[1].Name, "--path", "deployment")
|
||||
_, err := RunCli("app", "set", ctx.GetName(), "--source-name", sources[1].Name, "--path", "deployment")
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
Expect(Success("")).
|
||||
@@ -289,11 +289,11 @@ func TestMultiSourceApptErrorWhenSourceNameAndSourcePosition(t *testing.T) {
|
||||
Then().
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
And(func(_ *Application) {
|
||||
_, err := RunCli("app", "get", Name(), "--source-name", sources[1].Name, "--source-position", "1")
|
||||
_, err := RunCli("app", "get", ctx.GetName(), "--source-name", sources[1].Name, "--source-position", "1")
|
||||
assert.ErrorContains(t, err, "Only one of source-position and source-name can be specified.")
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
_, err := RunCli("app", "manifests", Name(), "--revisions", "0.0.2", "--source-names", sources[0].Name, "--revisions", "0.0.2", "--source-positions", "1")
|
||||
_, err := RunCli("app", "manifests", ctx.GetName(), "--revisions", "0.0.2", "--source-names", sources[0].Name, "--revisions", "0.0.2", "--source-positions", "1")
|
||||
assert.ErrorContains(t, err, "Only one of source-positions and source-names can be specified.")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestAppCreationInOtherNamespace(t *testing.T) {
|
||||
assert.Equal(t, AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, RepoURL(RepoURLTypeFile), app.Spec.GetSource().RepoURL)
|
||||
assert.Equal(t, guestbookPath, app.Spec.GetSource().Path)
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(NamespacedEvent(ctx.AppNamespace(), EventReasonResourceCreated, "create")).
|
||||
|
||||
@@ -69,9 +69,6 @@ func TestSimpleGitDirectoryGenerator(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -132,7 +129,7 @@ func TestSimpleGitDirectoryGenerator(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -178,9 +175,6 @@ func TestSimpleGitDirectoryGeneratorGoTemplate(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -242,7 +236,7 @@ func TestSimpleGitDirectoryGeneratorGoTemplate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -307,9 +301,6 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -342,7 +333,7 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
}).
|
||||
Then().Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
@@ -409,9 +400,6 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
IgnoreErrors().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -447,7 +435,7 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
},
|
||||
}).Then().
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
@@ -492,9 +480,6 @@ func TestSimpleGitFilesGenerator(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -555,7 +540,7 @@ func TestSimpleGitFilesGenerator(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -621,9 +606,6 @@ func TestSimpleGitFilesGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -655,7 +637,7 @@ func TestSimpleGitFilesGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
},
|
||||
}).Then().Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
@@ -722,9 +704,6 @@ func TestSimpleGitFilesGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
IgnoreErrors().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -756,7 +735,7 @@ func TestSimpleGitFilesGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
},
|
||||
}).Then().
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
@@ -801,9 +780,6 @@ func TestSimpleGitFilesGeneratorGoTemplate(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -865,7 +841,7 @@ func TestSimpleGitFilesGeneratorGoTemplate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -878,9 +854,6 @@ func TestSimpleGitFilesPreserveResourcesOnDeletion(t *testing.T) {
|
||||
CreateNamespace(utils.ApplicationsResourcesNamespace).
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -938,9 +911,6 @@ func TestSimpleGitFilesPreserveResourcesOnDeletionGoTemplate(t *testing.T) {
|
||||
CreateNamespace(utils.ApplicationsResourcesNamespace).
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1032,9 +1002,6 @@ func TestGitGeneratorPrivateRepo(t *testing.T) {
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1108,9 +1075,6 @@ func TestGitGeneratorPrivateRepoGoTemplate(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1184,9 +1148,6 @@ func TestSimpleGitGeneratorPrivateRepoWithNoRepo(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1249,20 +1210,15 @@ func TestSimpleGitGeneratorPrivateRepoWithMatchingProject(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx := Given(t)
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("https-kustomize-base"),
|
||||
}
|
||||
|
||||
var expectedAppsNewNamespace []v1alpha1.Application
|
||||
|
||||
Given(t).
|
||||
HTTPSInsecureRepoURLAdded("default").
|
||||
ctx.HTTPSInsecureRepoURLAdded("default").
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1295,7 +1251,7 @@ func TestSimpleGitGeneratorPrivateRepoWithMatchingProject(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedApps)).
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
|
||||
func TestSimpleGitGeneratorPrivateRepoWithMismatchingProject(t *testing.T) {
|
||||
@@ -1336,9 +1292,6 @@ func TestSimpleGitGeneratorPrivateRepoWithMismatchingProject(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1412,9 +1365,6 @@ func TestGitGeneratorPrivateRepoWithTemplatedProject(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1499,9 +1449,6 @@ func TestGitGeneratorPrivateRepoWithTemplatedProjectAndProjectScopedRepo(t *test
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
|
||||
673
test/e2e/applicationset_progressive_sync_test.go
Normal file
673
test/e2e/applicationset_progressive_sync_test.go
Normal file
@@ -0,0 +1,673 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
. "github.com/argoproj/argo-cd/v3/test/e2e/fixture/applicationsets"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
TransitionTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
func TestApplicationSetProgressiveSyncStep(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - env variable not set to enable progressive sync")
|
||||
}
|
||||
expectedDevApp := v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1-dev",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "dev",
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "app1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedStageApp := v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app2-staging",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "staging",
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "app2",
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedProdApp := v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app3-prod",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "prod",
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "app3",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Given(t).
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "progressive-sync-apps",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "{{.name}}-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "{{.name}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"name": "app1", "environment": "dev"}`)},
|
||||
{Raw: []byte(`{"name": "app2", "environment": "staging"}`)},
|
||||
{Raw: []byte(`{"name": "app3", "environment": "prod"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}).
|
||||
Then().
|
||||
And(func() {
|
||||
t.Log("ApplicationSet created ")
|
||||
}).
|
||||
Expect(ApplicationsExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp})).
|
||||
And(func() {
|
||||
t.Log("All applications exist")
|
||||
}).
|
||||
ExpectWithDuration(CheckApplicationInRightSteps("1", []string{"app1-dev"}), TransitionTimeout).
|
||||
ExpectWithDuration(CheckApplicationInRightSteps("2", []string{"app2-staging"}), time.Second*5).
|
||||
ExpectWithDuration(CheckApplicationInRightSteps("3", []string{"app3-prod"}), time.Second*5).
|
||||
// cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
ExpectWithDuration(ApplicationsDoNotExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp}), time.Minute)
|
||||
}
|
||||
|
||||
func TestProgressiveSyncHealthGating(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
expectedDevApp := generateExpectedApp("prog-", "progressive-sync/", "dev", "dev")
|
||||
expectedStageApp := generateExpectedApp("prog-", "progressive-sync/", "staging", "staging")
|
||||
expectedProdApp := generateExpectedApp("prog-", "progressive-sync/", "prod", "prod")
|
||||
|
||||
expectedStatusWave1 := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncProgressing,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncWaiting,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncWaiting,
|
||||
},
|
||||
}
|
||||
|
||||
expectedStatusWave2 := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncProgressing,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncWaiting,
|
||||
},
|
||||
}
|
||||
|
||||
expectedStatusWave3 := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncProgressing,
|
||||
},
|
||||
}
|
||||
|
||||
expectedAllHealthy := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
}
|
||||
|
||||
Given(t).
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "progressive-sync-gating",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/{{.environment}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.environment}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"environment": "dev"}`)},
|
||||
{Raw: []byte(`{"environment": "staging"}`)},
|
||||
{Raw: []byte(`{"environment": "prod"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}).
|
||||
Then().
|
||||
Expect(ApplicationsExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp})).
|
||||
And(func() {
|
||||
t.Log("ApplicationSet created")
|
||||
t.Log("Checking Dev app should be stuck in Progressing (invalid image)")
|
||||
t.Log("Verifying staging and prod are Waiting")
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedStatusWave1), TransitionTimeout).
|
||||
And(func() {
|
||||
// Patch deployment to use valid image
|
||||
fixture.Patch(t, "progressive-sync/dev/deployment.yaml", `[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "quay.io/argoprojlabs/argocd-e2e-container:0.1"}]`)
|
||||
// Refresh the app to detect git changes
|
||||
_, err := fixture.RunCli("app", "get", "prog-dev", "--refresh")
|
||||
require.NoError(t, err)
|
||||
t.Log("After patching image and refreshing, Dev app should progress to Healthy")
|
||||
t.Log("Staging app should now be in Progressing, and prod is waiting")
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedStatusWave2), TransitionTimeout).
|
||||
And(func() {
|
||||
// Patch deployment to use valid image
|
||||
fixture.Patch(t, "progressive-sync/staging/deployment.yaml", `[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "quay.io/argoprojlabs/argocd-e2e-container:0.1"}]`)
|
||||
// Refresh the app to detect git changes
|
||||
_, err := fixture.RunCli("app", "get", "prog-staging", "--refresh")
|
||||
require.NoError(t, err)
|
||||
t.Log("Dev and staging are now Healthy")
|
||||
t.Log("check Prod app is progressing")
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedStatusWave3), TransitionTimeout).
|
||||
And(func() {
|
||||
// Patch deployment to use valid image
|
||||
fixture.Patch(t, "progressive-sync/prod/deployment.yaml", `[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "quay.io/argoprojlabs/argocd-e2e-container:0.1"}]`)
|
||||
// Refresh the app to detect git changes
|
||||
_, err := fixture.RunCli("app", "get", "prog-prod", "--refresh")
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedAllHealthy), TransitionTimeout).
|
||||
And(func() {
|
||||
t.Log("progressive sync verified")
|
||||
t.Log("Dev progressed first")
|
||||
t.Log("Staging waited until Dev was Healthy")
|
||||
t.Log("Prod waited until Staging was Healthy")
|
||||
}).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
ExpectWithDuration(ApplicationsDoNotExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp}), TransitionTimeout)
|
||||
}
|
||||
|
||||
func TestNoApplicationStatusWhenNoSteps(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
|
||||
expectedConditions := []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: v1alpha1.ApplicationSetReasonParametersGenerated,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
Message: "ApplicationSet Rollout has completed",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
|
||||
},
|
||||
}
|
||||
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod"),
|
||||
}
|
||||
Given(t).
|
||||
When().
|
||||
Create(appSetInvalidStepConfiguration).
|
||||
Then().
|
||||
Expect(ApplicationSetHasConditions(expectedConditions)). // TODO: when no steps created, condition should reflect that.
|
||||
Expect(ApplicationSetDoesNotHaveApplicationStatus()).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
ExpectWithDuration(ApplicationsDoNotExist(expectedApps), TransitionTimeout)
|
||||
}
|
||||
|
||||
func TestNoApplicationStatusWhenNoApplications(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod"),
|
||||
}
|
||||
Given(t).
|
||||
When().
|
||||
Create(appSetWithEmptyGenerator).
|
||||
Then().
|
||||
Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
Expect(ApplicationSetDoesNotHaveApplicationStatus()).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
|
||||
func TestProgressiveSyncMultipleAppsPerStep(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "sketch", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "build", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "verify", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "validate", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "ship", "prod"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "run", "prod"),
|
||||
}
|
||||
Given(t).
|
||||
When().
|
||||
Create(appSetWithMultipleAppsInEachStep).
|
||||
Then().
|
||||
Expect(ApplicationsExist(expectedApps)).
|
||||
Expect(CheckApplicationInRightSteps("1", []string{"prog-sketch", "prog-build"})).
|
||||
Expect(CheckApplicationInRightSteps("2", []string{"prog-verify", "prog-validate"})).
|
||||
Expect(CheckApplicationInRightSteps("3", []string{"prog-ship", "prog-run"})).
|
||||
ExpectWithDuration(ApplicationSetHasApplicationStatus(6), TransitionTimeout).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
|
||||
var appSetInvalidStepConfiguration = v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-step-configuration",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ApplicationSet",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/{{.environment}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.environment}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"environment": "dev"}`)},
|
||||
{Raw: []byte(`{"environment": "staging"}`)},
|
||||
{Raw: []byte(`{"environment": "prod"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
// Empty Steps with Rolling Sync shouldn't trigger
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var appSetWithEmptyGenerator = v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "appset-empty-generator",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ApplicationSet",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/{{.environment}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.environment}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
// Empty Generator
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var appSetWithMultipleAppsInEachStep = v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "progressive-sync-multi-apps",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.name}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/multiple-apps-in-step/{{.environment}}/{{.name}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.name}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"environment": "dev", "name": "sketch"}`)},
|
||||
{Raw: []byte(`{"environment": "dev", "name": "build"}`)},
|
||||
{Raw: []byte(`{"environment": "staging", "name": "verify"}`)},
|
||||
{Raw: []byte(`{"environment": "staging", "name": "validate"}`)},
|
||||
{Raw: []byte(`{"environment": "prod", "name": "ship"}`)},
|
||||
{Raw: []byte(`{"environment": "prod", "name": "run"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func generateExpectedApp(prefix string, path string, name string, envVar string) v1alpha1.Application {
|
||||
return v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: prefix + name,
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": envVar,
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: path + name,
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: prefix + name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateStandardRolloutSyncSteps() []v1alpha1.ApplicationSetRolloutStep {
|
||||
return []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{"dev"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{"staging"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{"prod"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -15,13 +15,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
. "github.com/argoproj/argo-cd/v3/test/e2e/fixture/applicationsets"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture/applicationsets/utils"
|
||||
|
||||
@@ -82,39 +82,35 @@ func TestSimpleListGeneratorExternalNamespace(t *testing.T) {
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
CreateNamespace(externalNamespace).Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator-external",
|
||||
Namespace: externalNamespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
|
||||
// Update the ApplicationSet template namespace, and verify it updates the Applications
|
||||
When().
|
||||
@@ -143,7 +139,7 @@ func TestSimpleListGeneratorExternalNamespace(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator-external", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -209,74 +205,72 @@ func TestSimpleListGeneratorExternalNamespaceNoConflict(t *testing.T) {
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace2).
|
||||
CreateNamespace(externalNamespace2).Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator-external",
|
||||
Namespace: externalNamespace2,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: externalNamespace2,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedAppExternalNamespace2})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedAppExternalNamespace2})).
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
CreateNamespace(externalNamespace).Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator-external",
|
||||
Namespace: externalNamespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: externalNamespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace2).
|
||||
Then().
|
||||
@@ -317,7 +311,7 @@ func TestSimpleListGeneratorExternalNamespaceNoConflict(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator-external", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace2).
|
||||
Then().
|
||||
@@ -365,37 +359,35 @@ func TestSimpleListGenerator(t *testing.T) {
|
||||
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
|
||||
// Update the ApplicationSet template namespace, and verify it updates the Applications
|
||||
When().
|
||||
@@ -420,7 +412,7 @@ func TestSimpleListGenerator(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -457,9 +449,6 @@ func TestSimpleListGeneratorGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -512,7 +501,7 @@ func TestSimpleListGeneratorGoTemplate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -553,9 +542,6 @@ func TestRenderHelmValuesObject(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-values-object",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -651,9 +637,6 @@ func TestTemplatePatch(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "patch-template",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -714,7 +697,7 @@ func TestTemplatePatch(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("patch-template", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -755,9 +738,6 @@ func TestUpdateHelmValuesObject(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-values-object-patch",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -791,7 +771,7 @@ func TestUpdateHelmValuesObject(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}).Then().
|
||||
Expect(ApplicationSetHasConditions("test-values-object-patch", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
When().
|
||||
// Update the app spec with some knew ValuesObject to force a merge
|
||||
Update(func(as *v1alpha1.ApplicationSet) {
|
||||
@@ -836,9 +816,6 @@ func TestSyncPolicyCreateUpdate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sync-policy-create-update",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -911,7 +888,7 @@ func TestSyncPolicyCreateUpdate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("sync-policy-create-update", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it not deletes the Applications
|
||||
// As policy is create-update, AppSet controller will remove all generated applications's ownerReferences on delete AppSet
|
||||
@@ -949,9 +926,6 @@ func TestSyncPolicyCreateDelete(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sync-policy-create-delete",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1012,7 +986,7 @@ func TestSyncPolicyCreateDelete(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("sync-policy-create-delete", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet
|
||||
When().
|
||||
@@ -1048,9 +1022,6 @@ func TestSyncPolicyCreateOnly(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sync-policy-create-only",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1114,7 +1085,7 @@ func TestSyncPolicyCreateOnly(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewNamespace})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("sync-policy-create-only", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it not deletes the Applications
|
||||
// As policy is create-update, AppSet controller will remove all generated applications's ownerReferences on delete AppSet
|
||||
@@ -1365,9 +1336,6 @@ func TestSimpleSCMProviderGenerator(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{ repository }}-guestbook"},
|
||||
@@ -1440,9 +1408,6 @@ func TestSimpleSCMProviderGeneratorGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1507,12 +1472,9 @@ func TestSCMProviderGeneratorSCMProviderNotAllowed(t *testing.T) {
|
||||
// Because you can't &"".
|
||||
repoMatch := "argo-cd"
|
||||
|
||||
Given(t).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "scm-provider-generator-scm-provider-not-allowed",
|
||||
},
|
||||
ctx := Given(t)
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
ctx.When().Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1549,7 +1511,7 @@ func TestSCMProviderGeneratorSCMProviderNotAllowed(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp})).
|
||||
And(func() {
|
||||
// app should be listed
|
||||
output, err := fixture.RunCli("appset", "get", "scm-provider-generator-scm-provider-not-allowed")
|
||||
output, err := fixture.RunCli("appset", "get", ctx.GetName())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "scm provider not allowed")
|
||||
})
|
||||
@@ -1583,9 +1545,6 @@ func TestCustomApplicationFinalizers(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
@@ -1650,9 +1609,6 @@ func TestCustomApplicationFinalizersGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1783,9 +1739,6 @@ func TestSimpleSCMProviderGeneratorTokenRefStrictOk(t *testing.T) {
|
||||
}).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator-strict",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{ repository }}-guestbook"},
|
||||
@@ -1869,29 +1822,26 @@ func TestSimpleSCMProviderGeneratorTokenRefStrictKo(t *testing.T) {
|
||||
// Because you can't &"".
|
||||
repoMatch := "argo-cd"
|
||||
|
||||
Given(t).
|
||||
And(func() {
|
||||
_, err := utils.GetE2EFixtureK8sClient(t).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Create(t.Context(), &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Name: secretName,
|
||||
Labels: map[string]string{
|
||||
// Try to exfiltrate cluster secret
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
|
||||
},
|
||||
ctx := Given(t)
|
||||
ctx.And(func() {
|
||||
_, err := utils.GetE2EFixtureK8sClient(t).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Create(t.Context(), &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Name: secretName,
|
||||
Labels: map[string]string{
|
||||
// Try to exfiltrate cluster secret
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"hello": []byte("world"),
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"hello": []byte("world"),
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
assert.NoError(t, err)
|
||||
}).
|
||||
assert.NoError(t, err)
|
||||
}).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator-strict-ko",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{ repository }}-guestbook"},
|
||||
@@ -1932,7 +1882,7 @@ func TestSimpleSCMProviderGeneratorTokenRefStrictKo(t *testing.T) {
|
||||
When().
|
||||
And(func() {
|
||||
// app should be listed
|
||||
output, err := fixture.RunCli("appset", "get", "simple-scm-provider-generator-strict-ko")
|
||||
output, err := fixture.RunCli("appset", "get", ctx.GetName())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, fmt.Sprintf("scm provider: error fetching Github token: secret %s/%s is not a valid SCM creds secret", fixture.TestNamespace(), secretName))
|
||||
err2 := utils.GetE2EFixtureK8sClient(t).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Delete(t.Context(), secretName, metav1.DeleteOptions{})
|
||||
@@ -1978,9 +1928,6 @@ func TestSimplePullRequestGenerator(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-pull-request-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "guestbook-{{ number }}"},
|
||||
@@ -2057,9 +2004,6 @@ func TestSimplePullRequestGeneratorGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-pull-request-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -2132,12 +2076,9 @@ func TestPullRequestGeneratorNotAllowedSCMProvider(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
Given(t).
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pull-request-generator-not-allowed-scm",
|
||||
},
|
||||
ctx := Given(t)
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
ctx.When().Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -2179,7 +2120,7 @@ func TestPullRequestGeneratorNotAllowedSCMProvider(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp})).
|
||||
And(func() {
|
||||
// app should be listed
|
||||
output, err := fixture.RunCli("appset", "get", "pull-request-generator-not-allowed-scm")
|
||||
output, err := fixture.RunCli("appset", "get", ctx.GetName())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "scm provider not allowed")
|
||||
})
|
||||
|
||||
@@ -35,27 +35,27 @@ func createTestPlugin(t *testing.T, name, content string) string {
|
||||
|
||||
// TestCliAppCommand verifies the basic Argo CD CLI commands for app synchronization and listing.
|
||||
func TestCliAppCommand(t *testing.T) {
|
||||
Given(t).
|
||||
Path("hook").
|
||||
ctx := Given(t)
|
||||
ctx.Path("hook").
|
||||
When().
|
||||
CreateApp().
|
||||
And(func() {
|
||||
output, err := RunCli("app", "sync", Name(), "--timeout", "90")
|
||||
output, err := RunCli("app", "sync", ctx.AppName(), "--timeout", "90")
|
||||
require.NoError(t, err)
|
||||
vars := map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()}
|
||||
vars := map[string]any{"Name": ctx.AppName(), "Namespace": ctx.DeploymentNamespace()}
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} pod Synced Progressing pod/pod created`, vars))
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} hook Succeeded Sync pod/hook created`, vars))
|
||||
}).
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
And(func(a *Application) {
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
expected := Tmpl(
|
||||
t,
|
||||
`{{.Name}} https://kubernetes.default.svc {{.Namespace}} default Synced Healthy Manual <none>`,
|
||||
map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()})
|
||||
map[string]any{"Name": a.GetName(), "Namespace": ctx.DeploymentNamespace()})
|
||||
assert.Contains(t, NormalizeOutput(output), expected)
|
||||
})
|
||||
}
|
||||
@@ -75,17 +75,18 @@ func TestNormalArgoCDCommandsExecuteOverPluginsWithSameName(t *testing.T) {
|
||||
})
|
||||
t.Setenv("PATH", filepath.Dir(pluginPath)+":"+origPath)
|
||||
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.Path("hook").
|
||||
Path("hook").
|
||||
When().
|
||||
CreateApp().
|
||||
And(func() {
|
||||
output, err := RunCli("app", "sync", Name(), "--timeout", "90")
|
||||
output, err := RunCli("app", "sync", ctx.AppName(), "--timeout", "90")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotContains(t, NormalizeOutput(output), "I am a plugin, not Argo CD!")
|
||||
|
||||
vars := map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()}
|
||||
vars := map[string]any{"Name": ctx.AppName(), "Namespace": ctx.DeploymentNamespace()}
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} pod Synced Progressing pod/pod created`, vars))
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} hook Succeeded Sync pod/hook created`, vars))
|
||||
}).
|
||||
@@ -101,7 +102,7 @@ func TestNormalArgoCDCommandsExecuteOverPluginsWithSameName(t *testing.T) {
|
||||
expected := Tmpl(
|
||||
t,
|
||||
`{{.Name}} https://kubernetes.default.svc {{.Namespace}} default Synced Healthy Manual <none>`,
|
||||
map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()})
|
||||
map[string]any{"Name": ctx.AppName(), "Namespace": ctx.DeploymentNamespace()})
|
||||
assert.Contains(t, NormalizeOutput(output), expected)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -48,11 +48,7 @@ func TestSimpleClusterGeneratorExternalNamespace(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
CreateNamespace(externalNamespace).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -148,9 +144,6 @@ func TestSimpleClusterGenerator(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -273,9 +266,6 @@ func TestClusterGeneratorWithLocalCluster(t *testing.T) {
|
||||
// Create a ClusterGenerator-based ApplicationSet
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "in-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -364,9 +354,6 @@ func TestSimpleClusterGeneratorAddingCluster(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -447,9 +434,6 @@ func TestSimpleClusterGeneratorDeletingCluster(t *testing.T) {
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
CreateClusterSecret("my-secret2", "cluster2", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -540,9 +524,6 @@ func TestClusterGeneratorWithFlatListMode(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
|
||||
@@ -25,20 +25,19 @@ func TestClusterList(t *testing.T) {
|
||||
expected := fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
|
||||
https://kubernetes.default.svc in-cluster %v Successful `, fixture.GetVersions(t).ServerVersion)
|
||||
|
||||
clusterFixture.
|
||||
Given(t).
|
||||
Project(fixture.ProjectName)
|
||||
ctx := clusterFixture.Given(t)
|
||||
ctx.Project(fixture.ProjectName)
|
||||
|
||||
// We need an application targeting the cluster, otherwise the test will
|
||||
// fail if run isolated.
|
||||
app.GivenWithSameState(t).
|
||||
app.GivenWithSameState(ctx).
|
||||
Path(guestbookPath).
|
||||
When().
|
||||
CreateApp()
|
||||
|
||||
tries := 25
|
||||
for i := 0; i <= tries; i++ {
|
||||
clusterFixture.GivenWithSameState(t).
|
||||
clusterFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
List().
|
||||
Then().
|
||||
@@ -56,9 +55,8 @@ https://kubernetes.default.svc in-cluster %v Successful `, fixtu
|
||||
}
|
||||
|
||||
func TestClusterAdd(t *testing.T) {
|
||||
clusterFixture.
|
||||
Given(t).
|
||||
Project(fixture.ProjectName).
|
||||
ctx := clusterFixture.Given(t)
|
||||
ctx.Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
When().
|
||||
@@ -66,21 +64,20 @@ func TestClusterAdd(t *testing.T) {
|
||||
List().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Equal(t, fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
|
||||
https://kubernetes.default.svc test-cluster-add %v Successful %s`, fixture.GetVersions(t).ServerVersion, fixture.ProjectName), output)
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
|
||||
})
|
||||
}
|
||||
|
||||
func TestClusterAddPermissionDenied(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
ctx := accountFixture.Given(t)
|
||||
ctx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
SetPermissions([]fixture.ACL{}, "org-admin")
|
||||
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -94,8 +91,8 @@ func TestClusterAddPermissionDenied(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterAddAllowed(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
accountCtx := accountFixture.Given(t)
|
||||
accountCtx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -112,8 +109,8 @@ func TestClusterAddAllowed(t *testing.T) {
|
||||
},
|
||||
}, "org-admin")
|
||||
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
ctx := clusterFixture.GivenWithSameState(accountCtx)
|
||||
ctx.Project(fixture.ProjectName).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -122,14 +119,13 @@ func TestClusterAddAllowed(t *testing.T) {
|
||||
List().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Equal(t, fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
|
||||
https://kubernetes.default.svc test-cluster-add-allowed %v Successful argo-project`, fixture.GetVersions(t).ServerVersion), output)
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
|
||||
})
|
||||
}
|
||||
|
||||
func TestClusterListDenied(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
ctx := accountFixture.Given(t)
|
||||
ctx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -142,7 +138,7 @@ func TestClusterListDenied(t *testing.T) {
|
||||
}, "org-admin")
|
||||
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -156,17 +152,14 @@ func TestClusterListDenied(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterSet(t *testing.T) {
|
||||
fixture.EnsureCleanState(t)
|
||||
defer fixture.RecordTestRun(t)
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
Project(fixture.ProjectName).
|
||||
Name("in-cluster").
|
||||
ctx := clusterFixture.Given(t)
|
||||
ctx.Project(fixture.ProjectName).
|
||||
Namespaces([]string{"namespace-edit-1", "namespace-edit-2"}).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
When().
|
||||
Create().
|
||||
SetNamespaces().
|
||||
GetByName("in-cluster").
|
||||
GetByName().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Contains(t, output, "namespace-edit-1")
|
||||
@@ -225,8 +218,8 @@ func TestClusterURLInRestAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterDeleteDenied(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
ctx := accountFixture.Given(t)
|
||||
ctx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -245,7 +238,7 @@ func TestClusterDeleteDenied(t *testing.T) {
|
||||
|
||||
// Attempt to remove cluster creds by name
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -259,7 +252,7 @@ func TestClusterDeleteDenied(t *testing.T) {
|
||||
|
||||
// Attempt to remove cluster creds by server
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -273,8 +266,8 @@ func TestClusterDeleteDenied(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterDelete(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("default").
|
||||
ctx := clusterFixture.Given(t)
|
||||
accountFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -296,14 +289,18 @@ func TestClusterDelete(t *testing.T) {
|
||||
},
|
||||
}, "org-admin")
|
||||
|
||||
clstAction := clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
Name("default").
|
||||
clstAction := ctx.
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
When().
|
||||
CreateWithRBAC()
|
||||
clstAction.
|
||||
Then().
|
||||
Expect().
|
||||
AndCLIOutput(func(_ string, err error) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
// Check that RBAC is created
|
||||
_, err := fixture.Run("", "kubectl", "get", "serviceaccount", "argocd-manager", "-n", "kube-system")
|
||||
@@ -318,7 +315,7 @@ func TestClusterDelete(t *testing.T) {
|
||||
clstAction.DeleteByName().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Equal(t, "Cluster 'default' removed", output)
|
||||
assert.Equal(t, fmt.Sprintf("Cluster '%s' removed", ctx.GetName()), output)
|
||||
})
|
||||
|
||||
// Check that RBAC is removed after delete
|
||||
|
||||
@@ -61,12 +61,8 @@ func TestSimpleClusterDecisionResourceGeneratorExternalNamespace(t *testing.T) {
|
||||
CreatePlacementDecisionConfigMap("my-configmap").
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
CreateNamespace(externalNamespace).
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -170,9 +166,6 @@ func TestSimpleClusterDecisionResourceGenerator(t *testing.T) {
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -281,9 +274,6 @@ func TestSimpleClusterDecisionResourceGeneratorAddingCluster(t *testing.T) {
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -377,9 +367,6 @@ func TestSimpleClusterDecisionResourceGeneratorDeletingClusterSecret(t *testing.
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -481,9 +468,6 @@ func TestSimpleClusterDecisionResourceGeneratorDeletingClusterFromResource(t *te
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
func TestCrossNamespaceOwnership(t *testing.T) {
|
||||
var clusterRoleUID string
|
||||
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("cross-namespace-ownership").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -54,14 +55,14 @@ metadata:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]`, DeploymentNamespace(), clusterRoleUID)
|
||||
verbs: ["get", "list"]`, ctx.DeploymentNamespace(), clusterRoleUID)
|
||||
|
||||
_, err := Run("", "sh", "-c", fmt.Sprintf("echo '%s' | kubectl apply -f -", roleYaml))
|
||||
require.NoError(t, err)
|
||||
t.Logf("Created Role in app namespace: %s", DeploymentNamespace())
|
||||
t.Logf("Created Role in app namespace: %s", ctx.DeploymentNamespace())
|
||||
|
||||
// Create another namespace for cross-namespace testing
|
||||
otherNamespace := DeploymentNamespace() + "-other"
|
||||
otherNamespace := ctx.DeploymentNamespace() + "-other"
|
||||
_, err = Run("", "kubectl", "create", "namespace", otherNamespace)
|
||||
if err != nil {
|
||||
// Namespace might already exist, that's ok
|
||||
@@ -185,7 +186,8 @@ rules:
|
||||
func TestCrossNamespaceOwnershipWithRefresh(t *testing.T) {
|
||||
var clusterRoleUID string
|
||||
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("cross-namespace-ownership").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -215,7 +217,7 @@ metadata:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]`, DeploymentNamespace(), clusterRoleUID)
|
||||
verbs: ["get", "list"]`, ctx.DeploymentNamespace(), clusterRoleUID)
|
||||
|
||||
_, err := Run("", "sh", "-c", fmt.Sprintf("echo '%s' | kubectl apply -f -", roleYaml))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
. "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
@@ -23,11 +21,7 @@ import (
|
||||
func TestCustomToolWithGitCreds(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitcreds")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitcreds").
|
||||
CustomCACertAdded().
|
||||
// add the private repo with credentials
|
||||
HTTPSRepoURLAdded(true).
|
||||
@@ -41,7 +35,7 @@ func TestCustomToolWithGitCreds(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "argocd", output)
|
||||
})
|
||||
@@ -51,11 +45,7 @@ func TestCustomToolWithGitCreds(t *testing.T) {
|
||||
func TestCustomToolWithGitCredsTemplate(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitcredstemplate")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitcredstemplate").
|
||||
CustomCACertAdded().
|
||||
// add the git creds template
|
||||
HTTPSCredentialsUserPassAdded().
|
||||
@@ -71,17 +61,17 @@ func TestCustomToolWithGitCredsTemplate(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "argocd", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitUsername}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitUsername}")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitPassword}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitPassword}")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
@@ -92,11 +82,7 @@ func TestCustomToolWithSSHGitCreds(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
// path does not matter, we ignore it
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitsshcreds")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitsshcreds").
|
||||
// add the private repo with ssh credentials
|
||||
CustomSSHKnownHostsAdded().
|
||||
SSHRepoURLAdded(true).
|
||||
@@ -111,12 +97,12 @@ func TestCustomToolWithSSHGitCreds(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", fixture.Name(), "-o", "jsonpath={.metadata.annotations.GitSSHCommand}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.GetName(), "-o", "jsonpath={.metadata.annotations.GitSSHCommand}")
|
||||
require.NoError(t, err)
|
||||
assert.Regexp(t, `-i [^ ]+`, output, "test plugin expects $GIT_SSH_COMMAND to contain the option '-i <path to ssh private key>'")
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", fixture.Name(), "-o", "jsonpath={.metadata.annotations.GitSSHCredsFileSHA}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.GetName(), "-o", "jsonpath={.metadata.annotations.GitSSHCredsFileSHA}")
|
||||
require.NoError(t, err)
|
||||
assert.Regexp(t, `\w+\s+[\/\w]+`, output, "git ssh credentials file should be able to be read, hashing the contents")
|
||||
})
|
||||
@@ -126,11 +112,7 @@ func TestCustomToolWithSSHGitCredsDisabled(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
// path does not matter, we ignore it
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitsshcreds-disable-provide")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitsshcreds-disable-provide").
|
||||
CustomCACertAdded().
|
||||
// add the private repo with ssh credentials
|
||||
CustomSSHKnownHostsAdded().
|
||||
@@ -150,11 +132,7 @@ func TestCustomToolWithSSHGitCredsDisabled(t *testing.T) {
|
||||
func TestCustomToolWithEnv(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-fileName")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-fileName").
|
||||
// does not matter what the path is
|
||||
Path("cmp-fileName").
|
||||
When().
|
||||
@@ -175,18 +153,18 @@ func TestCustomToolWithEnv(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "baz", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Foo}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Foo}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "bar", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedKubeVersion, output)
|
||||
}).
|
||||
@@ -195,7 +173,7 @@ func TestCustomToolWithEnv(t *testing.T) {
|
||||
expectedAPIVersionSlice := strings.Split(expectedAPIVersion, ",")
|
||||
sort.Strings(expectedAPIVersionSlice)
|
||||
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
require.NoError(t, err)
|
||||
outputSlice := strings.Split(output, ",")
|
||||
sort.Strings(outputSlice)
|
||||
@@ -211,11 +189,7 @@ func TestCustomToolSyncAndDiffLocal(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
appPath := filepath.Join(testdataPath, "guestbook")
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-kustomize")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-kustomize").
|
||||
// does not matter what the path is
|
||||
Path("guestbook").
|
||||
When().
|
||||
@@ -233,29 +207,11 @@ func TestCustomToolSyncAndDiffLocal(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func startCMPServer(t *testing.T, configFile string) {
|
||||
t.Helper()
|
||||
pluginSockFilePath := fixture.TmpDir + fixture.PluginSockFilePath
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd-cmp-server")
|
||||
// ARGOCD_PLUGINSOCKFILEPATH should be set as the same value as repo server env var
|
||||
t.Setenv("ARGOCD_PLUGINSOCKFILEPATH", pluginSockFilePath)
|
||||
if _, err := os.Stat(pluginSockFilePath); os.IsNotExist(err) {
|
||||
// path/to/whatever does not exist
|
||||
err := os.Mkdir(pluginSockFilePath, 0o700)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunWithStdin("", "", "../../dist/argocd", "--config-dir-path", configFile))
|
||||
}
|
||||
|
||||
// Discover by fileName
|
||||
func TestCMPDiscoverWithFileName(t *testing.T) {
|
||||
pluginName := "cmp-fileName"
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-fileName")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-fileName").
|
||||
Path(pluginName + "/subdir").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -269,11 +225,7 @@ func TestCMPDiscoverWithFileName(t *testing.T) {
|
||||
// Discover by Find glob
|
||||
func TestCMPDiscoverWithFindGlob(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-glob")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-find-glob").
|
||||
Path("guestbook").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -287,11 +239,7 @@ func TestCMPDiscoverWithFindGlob(t *testing.T) {
|
||||
// Discover by Plugin Name
|
||||
func TestCMPDiscoverWithPluginName(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-glob")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-find-glob").
|
||||
Path("guestbook").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -310,11 +258,7 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
pluginName := "cmp-find-command"
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-command")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-find-command").
|
||||
Path(pluginName).
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -324,13 +268,13 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "baz", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedKubeVersion, output)
|
||||
}).
|
||||
@@ -339,7 +283,7 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
expectedAPIVersionSlice := strings.Split(expectedAPIVersion, ",")
|
||||
sort.Strings(expectedAPIVersionSlice)
|
||||
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
require.NoError(t, err)
|
||||
outputSlice := strings.Split(output, ",")
|
||||
sort.Strings(outputSlice)
|
||||
@@ -349,12 +293,9 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPruneResourceFromCMP(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-glob")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
RunningCMPServer("./testdata/cmp-find-glob").
|
||||
Path("guestbook").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -366,18 +307,14 @@ func TestPruneResourceFromCMP(t *testing.T) {
|
||||
Then().
|
||||
Expect(DoesNotExist()).
|
||||
AndAction(func() {
|
||||
_, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "deployment", "guestbook-ui")
|
||||
_, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "deployment", "guestbook-ui")
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPreserveFileModeForCMP(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-preserve-file-mode")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-preserve-file-mode").
|
||||
Path("cmp-preserve-file-mode").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -393,11 +330,7 @@ func TestPreserveFileModeForCMP(t *testing.T) {
|
||||
|
||||
func TestCMPWithSymlinkPartialFiles(t *testing.T) {
|
||||
Given(t, fixture.WithTestData("testdata2")).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata2/cmp-symlink")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata2/cmp-symlink").
|
||||
Path("guestbook-partial-symlink-files").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -410,11 +343,7 @@ func TestCMPWithSymlinkPartialFiles(t *testing.T) {
|
||||
|
||||
func TestCMPWithSymlinkFiles(t *testing.T) {
|
||||
Given(t, fixture.WithTestData("testdata2")).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata2/cmp-symlink")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata2/cmp-symlink").
|
||||
Path("guestbook-symlink-files").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -427,11 +356,7 @@ func TestCMPWithSymlinkFiles(t *testing.T) {
|
||||
|
||||
func TestCMPWithSymlinkFolder(t *testing.T) {
|
||||
Given(t, fixture.WithTestData("testdata2")).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata2/cmp-symlink")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata2/cmp-symlink").
|
||||
Path("guestbook-symlink-folder").
|
||||
When().
|
||||
CreateApp().
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestDeploymentWithAnnotationTrackingMode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, out, fmt.Sprintf(`annotations:
|
||||
argocd.argoproj.io/tracking-id: %s:apps/Deployment:%s/nginx-deployment
|
||||
`, ctx.AppName(), DeploymentNamespace()))
|
||||
`, ctx.AppName(), ctx.DeploymentNamespace()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ func TestDeploymentWithoutTrackingMode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, out, fmt.Sprintf(`annotations:
|
||||
argocd.argoproj.io/tracking-id: %s:apps/Deployment:%s/nginx-deployment
|
||||
`, ctx.AppName(), DeploymentNamespace()))
|
||||
`, ctx.AppName(), ctx.DeploymentNamespace()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -128,19 +128,20 @@ func TestDeployToKubernetesAPIURLWithQueryParameter(t *testing.T) {
|
||||
// We test with both a cluster-scoped, and a non-cluster scoped, Argo CD Cluster Secret.
|
||||
clusterScopedParam := []bool{false, true}
|
||||
for _, clusterScoped := range clusterScopedParam {
|
||||
EnsureCleanState(t)
|
||||
ctx := Given(t)
|
||||
|
||||
// Simulate two users, each with their own Argo CD cluster secret that can only deploy to their Namespace
|
||||
users := []string{E2ETestPrefix + "user1", E2ETestPrefix + "user2"}
|
||||
users := []string{"user1", "user2"}
|
||||
|
||||
for _, username := range users {
|
||||
createNamespaceScopedUser(t, username, clusterScoped)
|
||||
ns, _, destName := createNamespaceScopedUser(ctx, username, clusterScoped)
|
||||
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Name("e2e-test-app-"+username).
|
||||
DestName(destName).
|
||||
Path("deployment").
|
||||
When().
|
||||
CreateWithNoNameSpace("--dest-namespace", username).
|
||||
CreateWithNoNameSpace("--dest-namespace", ns).
|
||||
Sync().
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
@@ -159,13 +160,23 @@ func TestArgoCDSupportsMultipleServiceAccountsWithDifferingRBACOnSameCluster(t *
|
||||
clusterScopedParam := []bool{ /*false,*/ true}
|
||||
|
||||
for _, clusterScoped := range clusterScopedParam {
|
||||
EnsureCleanState(t)
|
||||
ctx := Given(t)
|
||||
|
||||
// Simulate two users, each with their own Argo CD cluster secret that can only deploy to their Namespace
|
||||
users := []string{E2ETestPrefix + "user1", E2ETestPrefix + "user2"}
|
||||
users := []string{"user1", "user2"}
|
||||
nsInfo := make(map[string]struct {
|
||||
namespace string
|
||||
serviceAccount string
|
||||
destName string
|
||||
})
|
||||
|
||||
for _, username := range users {
|
||||
createNamespaceScopedUser(t, username, clusterScoped)
|
||||
ns, sa, destName := createNamespaceScopedUser(ctx, username, clusterScoped)
|
||||
nsInfo[username] = struct {
|
||||
namespace string
|
||||
serviceAccount string
|
||||
destName string
|
||||
}{namespace: ns, serviceAccount: sa, destName: destName}
|
||||
}
|
||||
|
||||
for idx, username := range users {
|
||||
@@ -174,21 +185,21 @@ func TestArgoCDSupportsMultipleServiceAccountsWithDifferingRBACOnSameCluster(t *
|
||||
otherUser := users[(idx+1)%len(users)]
|
||||
|
||||
// e.g. Attempt to deploy to user1's namespace, with user2's cluster Secret. This should fail, as user2's cluster Secret does not have the requisite permissions.
|
||||
consequences := GivenWithSameState(t).
|
||||
consequences := GivenWithSameState(ctx).
|
||||
Name("e2e-test-app-"+username).
|
||||
DestName(E2ETestPrefix+"cluster-"+otherUser).
|
||||
DestName(nsInfo[otherUser].destName).
|
||||
Path("deployment").
|
||||
When().
|
||||
CreateWithNoNameSpace("--dest-namespace", username).IgnoreErrors().
|
||||
CreateWithNoNameSpace("--dest-namespace", nsInfo[username].namespace).IgnoreErrors().
|
||||
Sync().Then()
|
||||
|
||||
// The error message differs based on whether the Argo CD Cluster Secret is namespace-scoped or cluster-scoped, but the idea is the same:
|
||||
// - Even when deploying to the same cluster using 2 separate ServiceAccounts, the RBAC of those ServiceAccounts should continue to fully enforce RBAC boundaries.
|
||||
|
||||
if !clusterScoped {
|
||||
consequences.Expect(Condition(ApplicationConditionComparisonError, "Namespace \""+username+"\" for Deployment \"nginx-deployment\" is not managed"))
|
||||
consequences.Expect(Condition(ApplicationConditionComparisonError, "Namespace \""+nsInfo[username].namespace+"\" for Deployment \"nginx-deployment\" is not managed"))
|
||||
} else {
|
||||
consequences.Expect(OperationMessageContains("User \"system:serviceaccount:" + otherUser + ":" + otherUser + "-serviceaccount\" cannot create resource \"deployments\" in API group \"apps\" in the namespace \"" + username + "\""))
|
||||
consequences.Expect(OperationMessageContains("User \"system:serviceaccount:" + nsInfo[otherUser].namespace + ":" + nsInfo[otherUser].serviceAccount + "\" cannot create resource \"deployments\" in API group \"apps\" in the namespace \"" + nsInfo[username].namespace + "\""))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -196,10 +207,13 @@ func TestArgoCDSupportsMultipleServiceAccountsWithDifferingRBACOnSameCluster(t *
|
||||
|
||||
// generateReadOnlyClusterRoleandBindingForServiceAccount creates a ClusterRole/Binding that allows a ServiceAccount in a given namespace to read all resources on a cluster.
|
||||
// - This allows the ServiceAccount to be used within a cluster-scoped Argo CD Cluster Secret
|
||||
func generateReadOnlyClusterRoleandBindingForServiceAccount(roleSuffix string, serviceAccountNS string) (rbacv1.ClusterRole, rbacv1.ClusterRoleBinding) {
|
||||
func generateReadOnlyClusterRoleandBindingForServiceAccount(c *Context, username, serviceAccountName, namespace string) (rbacv1.ClusterRole, rbacv1.ClusterRoleBinding) {
|
||||
clusterRole := rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "read-all-" + roleSuffix,
|
||||
Name: DnsFriendly("read-all-"+username, "-"+c.ShortID()),
|
||||
Labels: map[string]string{
|
||||
TestingLabel: "true",
|
||||
},
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
@@ -210,12 +224,15 @@ func generateReadOnlyClusterRoleandBindingForServiceAccount(roleSuffix string, s
|
||||
|
||||
clusterRoleBinding := rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "read-all-" + roleSuffix,
|
||||
Name: DnsFriendly("read-all-"+username, "-"+c.ShortID()),
|
||||
Labels: map[string]string{
|
||||
TestingLabel: "true",
|
||||
},
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: serviceAccountNS,
|
||||
Name: roleSuffix + "-serviceaccount",
|
||||
Namespace: namespace,
|
||||
Name: serviceAccountName,
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
@@ -258,26 +275,29 @@ func buildArgoCDClusterSecret(secretName, secretNamespace, clusterName, clusterS
|
||||
// createNamespaceScopedUser
|
||||
// - username = name of Namespace the simulated user is able to deploy to
|
||||
// - clusterScopedSecrets = whether the Service Account is namespace-scoped or cluster-scoped.
|
||||
func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecrets bool) {
|
||||
t.Helper()
|
||||
func createNamespaceScopedUser(c *Context, username string, clusterScopedSecrets bool) (string, string, string) {
|
||||
c.T().Helper()
|
||||
// Create a new Namespace for our simulated user
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: username,
|
||||
Name: DnsFriendly(username, "-"+c.ShortID()),
|
||||
Labels: map[string]string{
|
||||
TestingLabel: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := KubeClientset.CoreV1().Namespaces().Create(t.Context(), &ns, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err := KubeClientset.CoreV1().Namespaces().Create(c.T().Context(), &ns, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
// Create a ServiceAccount in that Namespace, which will be used for the Argo CD Cluster SEcret
|
||||
serviceAccountName := username + "-serviceaccount"
|
||||
serviceAccountName := DnsFriendly(username, "-sa-"+c.ShortID())
|
||||
err = clusterauth.CreateServiceAccount(KubeClientset, serviceAccountName, ns.Name)
|
||||
require.NoError(t, err)
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
// Create a Role that allows the ServiceAccount to read/write all within the Namespace
|
||||
role := rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "allow-all",
|
||||
Name: DnsFriendly("allow-all", "-"+c.ShortID()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
@@ -286,13 +306,13 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
APIGroups: []string{"*"},
|
||||
}},
|
||||
}
|
||||
_, err = KubeClientset.RbacV1().Roles(role.Namespace).Create(t.Context(), &role, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.RbacV1().Roles(role.Namespace).Create(c.T().Context(), &role, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
// Bind the Role with the ServiceAccount in the Namespace
|
||||
roleBinding := rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "allow-all-binding",
|
||||
Name: DnsFriendly("allow-all-binding", "-"+c.ShortID()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
@@ -306,32 +326,32 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
_, err = KubeClientset.RbacV1().RoleBindings(roleBinding.Namespace).Create(t.Context(), &roleBinding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.RbacV1().RoleBindings(roleBinding.Namespace).Create(c.T().Context(), &roleBinding, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
var token string
|
||||
|
||||
// Attempting to patch the ServiceAccount can intermittently fail with 'failed to patch serviceaccount "(...)" with bearer token secret: Operation cannot be fulfilled on serviceaccounts "(...)": the object has been modified; please apply your changes to the latest version and try again'
|
||||
// We thus keep trying for up to 20 seconds.
|
||||
waitErr := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 20*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
waitErr := wait.PollUntilContextTimeout(c.T().Context(), 1*time.Second, 20*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
// Retrieve the bearer token from the ServiceAccount
|
||||
token, err = clusterauth.GetServiceAccountBearerToken(KubeClientset, ns.Name, serviceAccountName, time.Second*60)
|
||||
|
||||
// Success is no error and a real token, otherwise keep trying
|
||||
return (err == nil && token != ""), nil
|
||||
})
|
||||
require.NoError(t, waitErr)
|
||||
require.NotEmpty(t, token)
|
||||
require.NoError(c.T(), waitErr)
|
||||
require.NotEmpty(c.T(), token)
|
||||
|
||||
// In order to test a cluster-scoped Argo CD Cluster Secret, we may optionally grant the ServiceAccount read-all permissions at cluster scope.
|
||||
if clusterScopedSecrets {
|
||||
clusterRole, clusterRoleBinding := generateReadOnlyClusterRoleandBindingForServiceAccount(username, username)
|
||||
clusterRole, clusterRoleBinding := generateReadOnlyClusterRoleandBindingForServiceAccount(c, username, serviceAccountName, ns.Name)
|
||||
|
||||
_, err := KubeClientset.RbacV1().ClusterRoles().Create(t.Context(), &clusterRole, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err := KubeClientset.RbacV1().ClusterRoles().Create(c.T().Context(), &clusterRole, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
_, err = KubeClientset.RbacV1().ClusterRoleBindings().Create(t.Context(), &clusterRoleBinding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.RbacV1().ClusterRoleBindings().Create(c.T().Context(), &clusterRoleBinding, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
}
|
||||
|
||||
// Build the Argo CD Cluster Secret by using the service account token, and extracting needed values from kube config
|
||||
@@ -343,10 +363,10 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
}
|
||||
|
||||
jsonStringBytes, err := json.Marshal(clusterSecretConfigJSON)
|
||||
require.NoError(t, err)
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
_, apiURL, err := extractKubeConfigValues()
|
||||
require.NoError(t, err)
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
clusterResourcesField := ""
|
||||
namespacesField := ""
|
||||
@@ -358,13 +378,14 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
|
||||
// We create an Argo CD cluster Secret declaratively, using the K8s client, rather than via CLI, as the CLI doesn't currently
|
||||
// support Kubernetes API server URLs with query parameters.
|
||||
|
||||
secret := buildArgoCDClusterSecret("test-"+username, ArgoCDNamespace, E2ETestPrefix+"cluster-"+username, apiURL+"?user="+username,
|
||||
clusterName := DnsFriendly("test-"+username, "-"+c.ShortID())
|
||||
secret := buildArgoCDClusterSecret(clusterName, ArgoCDNamespace, clusterName, apiURL+"?user="+username,
|
||||
string(jsonStringBytes), clusterResourcesField, namespacesField)
|
||||
|
||||
// Finally, create the Cluster secret in the Argo CD E2E namespace
|
||||
_, err = KubeClientset.CoreV1().Secrets(secret.Namespace).Create(t.Context(), &secret, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.CoreV1().Secrets(secret.Namespace).Create(c.T().Context(), &secret, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
return ns.Name, serviceAccountName, clusterName
|
||||
}
|
||||
|
||||
// extractKubeConfigValues returns contents of the local environment's kubeconfig, using standard path resolution mechanism.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package project
|
||||
package account
|
||||
|
||||
import (
|
||||
"time"
|
||||
@@ -19,59 +19,59 @@ type Actions struct {
|
||||
}
|
||||
|
||||
func (a *Actions) prepareCanIGetLogsArgs() []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return []string{
|
||||
"account", "can-i", "get", "logs", a.context.project + "/*",
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Actions) CanIGetLogs() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli(a.prepareCanIGetLogsArgs()...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) prepareSetPasswordArgs(account string) []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return []string{
|
||||
"account", "update-password", "--account", account, "--current-password", fixture.AdminPassword, "--new-password", fixture.DefaultTestUserPassword,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Actions) Create() *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetAccounts(map[string][]string{
|
||||
a.context.name: {"login"},
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetAccounts(map[string][]string{
|
||||
a.context.GetName(): {"login"},
|
||||
}))
|
||||
_, _ = fixture.RunCli(a.prepareSetPasswordArgs(a.context.name)...)
|
||||
_, _ = fixture.RunCli(a.prepareSetPasswordArgs(a.context.GetName())...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetPermissions(permissions []fixture.ACL, roleName string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetPermissions(permissions, a.context.name, roleName))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetPermissions(permissions, a.context.GetName(), roleName))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetParamInSettingConfigMap(key, value string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetParamInSettingConfigMap(key, value))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetParamInSettingConfigMap(key, value))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Login() *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.LoginAs(a.context.name))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.LoginAs(a.context.GetName()))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package project
|
||||
package account
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -21,19 +21,19 @@ type Consequences struct {
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(account *account.Account, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.get())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) CurrentUser(block func(user *session.GetUserInfoResponse, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.getCurrentUser())
|
||||
return c
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (c *Consequences) get() (*account.Account, error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, acc := range accList.Items {
|
||||
if acc.Name == c.context.name {
|
||||
if acc.Name == c.context.GetName() {
|
||||
return acc, nil
|
||||
}
|
||||
}
|
||||
@@ -53,9 +53,9 @@ func (c *Consequences) get() (*account.Account, error) {
|
||||
}
|
||||
|
||||
func (c *Consequences) getCurrentUser() (*session.GetUserInfoResponse, error) {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
closer, client, err := fixture.ArgoCDClientset.NewSessionClient()
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
defer utilio.Close(closer)
|
||||
return client.GetUserInfo(context.Background(), &session.GetUserInfoRequest{})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package project
|
||||
package account
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -7,18 +7,25 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
// seconds
|
||||
name string
|
||||
*fixture.TestState
|
||||
|
||||
project string
|
||||
}
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return &Context{t: t, name: fixture.Name()}
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return &Context{TestState: state}
|
||||
}
|
||||
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx)}
|
||||
}
|
||||
|
||||
func (c *Context) Project(project string) *Context {
|
||||
@@ -26,12 +33,8 @@ func (c *Context) Project(project string) *Context {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) GetName() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.name = name
|
||||
c.SetName(name)
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
@@ -17,43 +17,43 @@ type Actions struct {
|
||||
}
|
||||
|
||||
func (a *Actions) prepareExportCommand() []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"export", "--application-namespaces", fixture.AppNamespace()}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (a *Actions) prepareImportCommand() []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"import", "--application-namespaces", fixture.AppNamespace(), "-"}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (a *Actions) RunExport() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli(a.prepareExportCommand()...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) RunImport(stdin string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCliWithStdin(stdin, a.prepareImportCommand()...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = RunCli(args...)
|
||||
}
|
||||
|
||||
func (a *Actions) runCliWithStdin(stdin string, args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = RunCliWithStdin(stdin, args...)
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
@@ -14,13 +14,13 @@ type Consequences struct {
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func()) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -7,20 +7,23 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
*fixture.TestState
|
||||
}
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(t)
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return &Context{TestState: state}
|
||||
}
|
||||
|
||||
func GivenWithSameState(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
return &Context{t}
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx)}
|
||||
}
|
||||
|
||||
func (c *Context) And(block func()) *Context {
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -41,71 +44,71 @@ func (a *Actions) DoNotIgnoreErrors() *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) PatchFile(file string, jsonPatch string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.Patch(a.context.t, a.context.path+"/"+file, jsonPatch)
|
||||
a.context.T().Helper()
|
||||
fixture.Patch(a.context.T(), a.context.path+"/"+file, jsonPatch)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteFile(file string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.Delete(a.context.t, a.context.path+"/"+file)
|
||||
a.context.T().Helper()
|
||||
fixture.Delete(a.context.T(), a.context.path+"/"+file)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) WriteFile(fileName, fileContents string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.WriteFile(a.context.t, a.context.path+"/"+fileName, fileContents)
|
||||
a.context.T().Helper()
|
||||
fixture.WriteFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddFile(fileName, fileContents string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddFile(a.context.t, a.context.path+"/"+fileName, fileContents)
|
||||
a.context.T().Helper()
|
||||
fixture.AddFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddSignedFile(fileName, fileContents string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddSignedFile(a.context.t, a.context.path+"/"+fileName, fileContents)
|
||||
a.context.T().Helper()
|
||||
fixture.AddSignedFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddSignedTag(name string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddSignedTag(a.context.t, name)
|
||||
a.context.T().Helper()
|
||||
fixture.AddSignedTag(a.context.T(), name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddTag(name string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddTag(a.context.t, name)
|
||||
a.context.T().Helper()
|
||||
fixture.AddTag(a.context.T(), name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddAnnotatedTag(name string, message string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddAnnotatedTag(a.context.t, name, message)
|
||||
a.context.T().Helper()
|
||||
fixture.AddAnnotatedTag(a.context.T(), name, message)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddTagWithForce(name string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddTagWithForce(a.context.t, name)
|
||||
a.context.T().Helper()
|
||||
fixture.AddTagWithForce(a.context.T(), name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) RemoveSubmodule() *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.RemoveSubmodule(a.context.t)
|
||||
a.context.T().Helper()
|
||||
fixture.RemoveSubmodule(a.context.T())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) CreateFromPartialFile(data string, flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
tmpFile, err := os.CreateTemp("", "")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
_, err = tmpFile.WriteString(data)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
|
||||
args := append([]string{
|
||||
"app", "create",
|
||||
@@ -113,7 +116,7 @@ func (a *Actions) CreateFromPartialFile(data string, flags ...string) *Actions {
|
||||
"--name", a.context.AppName(),
|
||||
"--repo", fixture.RepoURL(a.context.repoURLType),
|
||||
"--dest-server", a.context.destServer,
|
||||
"--dest-namespace", fixture.DeploymentNamespace(),
|
||||
"--dest-namespace", a.context.DeploymentNamespace(),
|
||||
}, flags...)
|
||||
if a.context.appNamespace != "" {
|
||||
args = append(args, "--app-namespace", a.context.appNamespace)
|
||||
@@ -124,7 +127,7 @@ func (a *Actions) CreateFromPartialFile(data string, flags ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.context.AppName(),
|
||||
@@ -138,7 +141,7 @@ func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: a.context.destServer,
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
Namespace: a.context.DeploymentNamespace(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -167,9 +170,9 @@ func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags
|
||||
handler(app)
|
||||
data := grpc.MustMarshal(app)
|
||||
tmpFile, err := os.CreateTemp("", "")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
_, err = tmpFile.Write(data)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
|
||||
args := append([]string{
|
||||
"app", "create",
|
||||
@@ -181,7 +184,7 @@ func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags
|
||||
}
|
||||
|
||||
func (a *Actions) CreateMultiSourceAppFromFile(flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.context.AppName(),
|
||||
@@ -192,7 +195,7 @@ func (a *Actions) CreateMultiSourceAppFromFile(flags ...string) *Actions {
|
||||
Sources: a.context.sources,
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: a.context.destServer,
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
Namespace: a.context.DeploymentNamespace(),
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
Automated: &v1alpha1.SyncPolicyAutomated{
|
||||
@@ -204,9 +207,9 @@ func (a *Actions) CreateMultiSourceAppFromFile(flags ...string) *Actions {
|
||||
|
||||
data := grpc.MustMarshal(app)
|
||||
tmpFile, err := os.CreateTemp("", "")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
_, err = tmpFile.Write(data)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
|
||||
args := append([]string{
|
||||
"app", "create",
|
||||
@@ -226,7 +229,7 @@ func (a *Actions) CreateWithNoNameSpace(args ...string) *Actions {
|
||||
|
||||
func (a *Actions) CreateApp(args ...string) *Actions {
|
||||
args = a.prepareCreateAppArgs(args)
|
||||
args = append(args, "--dest-namespace", fixture.DeploymentNamespace())
|
||||
args = append(args, "--dest-namespace", a.context.DeploymentNamespace())
|
||||
|
||||
// are you adding new context values? if you only use them for this func, then use args instead
|
||||
a.runCli(args...)
|
||||
@@ -235,7 +238,7 @@ func (a *Actions) CreateApp(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) prepareCreateAppArgs(args []string) []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{
|
||||
"app", "create", a.context.AppQualifiedName(),
|
||||
}, args...)
|
||||
@@ -326,33 +329,33 @@ func (a *Actions) prepareCreateAppArgs(args []string) []string {
|
||||
}
|
||||
|
||||
func (a *Actions) Declarative(filename string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return a.DeclarativeWithCustomRepo(filename, fixture.RepoURL(a.context.repoURLType))
|
||||
}
|
||||
|
||||
func (a *Actions) DeclarativeWithCustomRepo(filename string, repoURL string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
values := map[string]any{
|
||||
"ArgoCDNamespace": fixture.TestNamespace(),
|
||||
"DeploymentNamespace": fixture.DeploymentNamespace(),
|
||||
"DeploymentNamespace": a.context.DeploymentNamespace(),
|
||||
"Name": a.context.AppName(),
|
||||
"Path": a.context.path,
|
||||
"Project": a.context.project,
|
||||
"RepoURL": repoURL,
|
||||
}
|
||||
a.lastOutput, a.lastError = fixture.Declarative(a.context.t, filename, values)
|
||||
a.lastOutput, a.lastError = fixture.Declarative(a.context.T(), filename, values)
|
||||
a.verifyAction()
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) PatchApp(patch string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "patch", a.context.AppQualifiedName(), "--patch", patch)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) PatchAppHttp(patch string) *Actions { //nolint:revive //FIXME(var-naming)
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
var application v1alpha1.Application
|
||||
patchType := "merge"
|
||||
appName := a.context.AppQualifiedName()
|
||||
@@ -364,17 +367,17 @@ func (a *Actions) PatchAppHttp(patch string) *Actions { //nolint:revive //FIXME(
|
||||
AppNamespace: &appNamespace,
|
||||
}
|
||||
jsonBytes, err := json.MarshalIndent(patchRequest, "", " ")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
err = fixture.DoHttpJsonRequest("PATCH",
|
||||
fmt.Sprintf("/api/v1/applications/%v", appName),
|
||||
&application,
|
||||
jsonBytes...)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AppSet(flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"app", "set", a.context.AppQualifiedName()}
|
||||
args = append(args, flags...)
|
||||
a.runCli(args...)
|
||||
@@ -382,7 +385,7 @@ func (a *Actions) AppSet(flags ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) AppUnSet(flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"app", "unset", a.context.AppQualifiedName()}
|
||||
args = append(args, flags...)
|
||||
a.runCli(args...)
|
||||
@@ -390,9 +393,9 @@ func (a *Actions) AppUnSet(flags ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) Sync(args ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{"app", "sync"}, args...)
|
||||
if a.context.name != "" {
|
||||
if a.context.GetName() != "" {
|
||||
args = append(args, a.context.AppQualifiedName())
|
||||
}
|
||||
args = append(args, "--timeout", strconv.Itoa(a.context.timeout))
|
||||
@@ -436,21 +439,25 @@ func (a *Actions) Sync(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) ConfirmDeletion() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
a.runCli("app", "confirm-deletion", a.context.AppQualifiedName())
|
||||
|
||||
// Always sleep more than a second after the confirmation so the timestamp
|
||||
// is not valid for immediate subsequent operations
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) TerminateOp() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "terminate-op", a.context.AppQualifiedName())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Refresh(refreshType v1alpha1.RefreshType) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
flag := map[v1alpha1.RefreshType]string{
|
||||
v1alpha1.RefreshTypeNormal: "--refresh",
|
||||
v1alpha1.RefreshTypeHard: "--hard-refresh",
|
||||
@@ -462,33 +469,33 @@ func (a *Actions) Refresh(refreshType v1alpha1.RefreshType) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) Get() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "get", a.context.AppQualifiedName())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Delete(cascade bool) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "delete", a.context.AppQualifiedName(), fmt.Sprintf("--cascade=%v", cascade), "--yes")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteBySelector(selector string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "delete", "--selector="+selector, "--yes")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteBySelectorWithWait(selector string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "delete", "--selector="+selector, "--yes", "--wait")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Wait(args ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{"app", "wait"}, args...)
|
||||
if a.context.name != "" {
|
||||
if a.context.GetName() != "" {
|
||||
args = append(args, a.context.AppQualifiedName())
|
||||
}
|
||||
args = append(args, "--timeout", strconv.Itoa(a.context.timeout))
|
||||
@@ -497,65 +504,111 @@ func (a *Actions) Wait(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) SetParamInSettingConfigMap(key, value string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetParamInSettingConfigMap(key, value))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetParamInSettingConfigMap(key, value))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) And(block func()) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
block()
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return &Consequences{a.context, a, 15}
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
a.verifyAction()
|
||||
}
|
||||
|
||||
func (a *Actions) verifyAction() {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
if !a.ignoreErrors {
|
||||
a.Then().Expect(Success(""))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Actions) SetTrackingMethod(trackingMethod string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetTrackingMethod(trackingMethod))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetTrackingMethod(trackingMethod))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetInstallationID(installationID string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetInstallationID(installationID))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetInstallationID(installationID))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetTrackingLabel(trackingLabel string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetTrackingLabel(trackingLabel))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetTrackingLabel(trackingLabel))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) WithImpersonationEnabled(serviceAccountName string, policyRules []rbacv1.PolicyRule) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetImpersonationEnabled("true"))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetImpersonationEnabled("true"))
|
||||
if serviceAccountName == "" || policyRules == nil {
|
||||
return a
|
||||
}
|
||||
require.NoError(a.context.t, fixture.CreateRBACResourcesForImpersonation(serviceAccountName, policyRules))
|
||||
require.NoError(a.context.T(), createRBACResourcesForImpersonation(a.context.DeploymentNamespace(), serviceAccountName, policyRules))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) WithImpersonationDisabled() *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetImpersonationEnabled("false"))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetImpersonationEnabled("false"))
|
||||
return a
|
||||
}
|
||||
|
||||
// TODO: Ensure service account name and other resources have unique names based on the test context
|
||||
// TODO: This function should be moved to the project context since impersonation is a project concept, not application.
|
||||
func createRBACResourcesForImpersonation(namespace string, serviceAccountName string, policyRules []rbacv1.PolicyRule) error {
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
},
|
||||
}
|
||||
_, err := fixture.KubeClientset.CoreV1().ServiceAccounts(namespace).Create(context.Background(), sa, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "role"),
|
||||
},
|
||||
Rules: policyRules,
|
||||
}
|
||||
_, err = fixture.KubeClientset.RbacV1().Roles(namespace).Create(context.Background(), role, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rolebinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "rolebinding"),
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "role"),
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccountName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = fixture.KubeClientset.RbacV1().RoleBindings(namespace).Create(context.Background(), rolebinding, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ type Consequences struct {
|
||||
|
||||
func (c *Consequences) Expect(e Expectation) *Consequences {
|
||||
// this invocation makes sure this func is not reported as the cause of the failure - we are a "test helper"
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
var message string
|
||||
var state state
|
||||
sleepIntervals := []time.Duration{
|
||||
@@ -50,19 +50,19 @@ func (c *Consequences) Expect(e Expectation) *Consequences {
|
||||
log.Infof("expectation succeeded: %s", message)
|
||||
return c
|
||||
case failed:
|
||||
c.context.t.Fatalf("failed expectation: %s", message)
|
||||
c.context.T().Fatalf("failed expectation: %s", message)
|
||||
return c
|
||||
}
|
||||
log.Infof("pending: %s", message)
|
||||
}
|
||||
c.context.t.Fatal("timeout waiting for: " + message)
|
||||
c.context.T().Fatal("timeout waiting for: " + message)
|
||||
return c
|
||||
}
|
||||
|
||||
// ExpectConsistently will continuously evaluate a condition, and it must be true each time it is evaluated, otherwise the test is failed. The condition will be repeatedly evaluated until 'expirationDuration' is met, waiting 'waitDuration' after each success.
|
||||
func (c *Consequences) ExpectConsistently(e Expectation, waitDuration time.Duration, expirationDuration time.Duration) *Consequences {
|
||||
// this invocation makes sure this func is not reported as the cause of the failure - we are a "test helper"
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
|
||||
expiration := time.Now().Add(expirationDuration)
|
||||
for time.Now().Before(expiration) {
|
||||
@@ -71,7 +71,7 @@ func (c *Consequences) ExpectConsistently(e Expectation, waitDuration time.Durat
|
||||
case succeeded:
|
||||
log.Infof("expectation succeeded: %s", message)
|
||||
case failed:
|
||||
c.context.t.Fatalf("failed expectation: %s", message)
|
||||
c.context.T().Fatalf("failed expectation: %s", message)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -85,13 +85,13 @@ func (c *Consequences) ExpectConsistently(e Expectation, waitDuration time.Durat
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(app *v1alpha1.Application)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.app())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndAction(block func()) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block()
|
||||
return c
|
||||
}
|
||||
@@ -106,9 +106,9 @@ func (c *Consequences) When() *Actions {
|
||||
}
|
||||
|
||||
func (c *Consequences) app() *v1alpha1.Application {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
app, err := c.get()
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
return app
|
||||
}
|
||||
|
||||
@@ -117,16 +117,16 @@ func (c *Consequences) get() (*v1alpha1.Application, error) {
|
||||
}
|
||||
|
||||
func (c *Consequences) resource(kind, name, namespace string) v1alpha1.ResourceStatus {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
closer, client, err := fixture.ArgoCDClientset.NewApplicationClient()
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
defer utilio.Close(closer)
|
||||
app, err := client.Get(context.Background(), &applicationpkg.ApplicationQuery{
|
||||
Name: ptr.To(c.context.AppName()),
|
||||
Projects: []string{c.context.project},
|
||||
AppNamespace: ptr.To(c.context.appNamespace),
|
||||
})
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
for _, r := range app.Status.Resources {
|
||||
if r.Kind == kind && r.Name == name && (namespace == "" || namespace == r.Namespace) {
|
||||
return r
|
||||
@@ -141,7 +141,7 @@ func (c *Consequences) resource(kind, name, namespace string) v1alpha1.ResourceS
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -15,17 +16,18 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
// Context implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
*fixture.TestState
|
||||
path string
|
||||
chart string
|
||||
ociRegistry string
|
||||
ociRegistryPath string
|
||||
repoURLType fixture.RepoURLType
|
||||
// seconds
|
||||
timeout int
|
||||
name string
|
||||
timeout int
|
||||
|
||||
appNamespace string
|
||||
destServer string
|
||||
destName string
|
||||
@@ -64,8 +66,8 @@ type ContextArgs struct {
|
||||
|
||||
func Given(t *testing.T, opts ...fixture.TestOption) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t, opts...)
|
||||
return GivenWithSameState(t)
|
||||
state := fixture.EnsureCleanState(t, opts...)
|
||||
return GivenWithSameState(state)
|
||||
}
|
||||
|
||||
func GivenWithNamespace(t *testing.T, namespace string) *Context {
|
||||
@@ -75,17 +77,18 @@ func GivenWithNamespace(t *testing.T, namespace string) *Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func GivenWithSameState(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
// ARGOCD_E2E_DEFAULT_TIMEOUT can be used to override the default timeout
|
||||
// for any context.
|
||||
timeout := env.ParseNumFromEnv("ARGOCD_E2E_DEFAULT_TIMEOUT", 20, 0, 180)
|
||||
return &Context{
|
||||
t: t,
|
||||
TestState: fixture.NewTestStateFromContext(ctx),
|
||||
destServer: v1alpha1.KubernetesInternalAPIServerAddr,
|
||||
destName: "in-cluster",
|
||||
repoURLType: fixture.RepoURLTypeFile,
|
||||
name: fixture.Name(),
|
||||
timeout: timeout,
|
||||
project: "default",
|
||||
prune: true,
|
||||
@@ -93,8 +96,16 @@ func GivenWithSameState(t *testing.T) *Context {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.SetName(name)
|
||||
return c
|
||||
}
|
||||
|
||||
// AppName returns the unique application name for the test context.
|
||||
// Unique application names protects from potential conflicts between test run
|
||||
// caused by the tracking annotation on existing objects
|
||||
func (c *Context) AppName() string {
|
||||
return c.name
|
||||
return c.GetName()
|
||||
}
|
||||
|
||||
func (c *Context) AppQualifiedName() string {
|
||||
@@ -118,129 +129,134 @@ func (c *Context) SetAppNamespace(namespace string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) GPGPublicKeyAdded() *Context {
|
||||
gpgkeys.AddGPGPublicKey(c.t)
|
||||
gpgkeys.AddGPGPublicKey(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) GPGPublicKeyRemoved() *Context {
|
||||
gpgkeys.DeleteGPGPublicKey(c.t)
|
||||
gpgkeys.DeleteGPGPublicKey(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) CustomCACertAdded() *Context {
|
||||
certs.AddCustomCACert(c.t)
|
||||
certs.AddCustomCACert(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) CustomSSHKnownHostsAdded() *Context {
|
||||
certs.AddCustomSSHKnownHostsKeys(c.t)
|
||||
certs.AddCustomSSHKnownHostsKeys(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSRepoURLAdded(withCreds bool, opts ...repos.AddRepoOpts) *Context {
|
||||
repos.AddHTTPSRepo(c.t, false, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
repos.AddHTTPSRepo(c.T(), false, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSInsecureRepoURLAdded(withCreds bool, opts ...repos.AddRepoOpts) *Context {
|
||||
repos.AddHTTPSRepo(c.t, true, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
repos.AddHTTPSRepo(c.T(), true, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSInsecureRepoURLWithClientCertAdded() *Context {
|
||||
repos.AddHTTPSRepoClientCert(c.t, true)
|
||||
repos.AddHTTPSRepoClientCert(c.T(), true)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSRepoURLWithClientCertAdded() *Context {
|
||||
repos.AddHTTPSRepoClientCert(c.t, false)
|
||||
repos.AddHTTPSRepoClientCert(c.T(), false)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SubmoduleHTTPSRepoURLAdded(withCreds bool) *Context {
|
||||
fixture.CreateSubmoduleRepos(c.t, "https")
|
||||
repos.AddHTTPSRepo(c.t, false, withCreds, "", fixture.RepoURLTypeHTTPSSubmoduleParent)
|
||||
fixture.CreateSubmoduleRepos(c.T(), "https")
|
||||
repos.AddHTTPSRepo(c.T(), false, withCreds, "", fixture.RepoURLTypeHTTPSSubmoduleParent)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) WriteCredentials(insecure bool) *Context {
|
||||
repos.AddWriteCredentials(c.T(), c.GetName(), insecure, c.repoURLType)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SSHRepoURLAdded(withCreds bool) *Context {
|
||||
repos.AddSSHRepo(c.t, false, withCreds, fixture.RepoURLTypeSSH)
|
||||
repos.AddSSHRepo(c.T(), false, withCreds, fixture.RepoURLTypeSSH)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SSHInsecureRepoURLAdded(withCreds bool) *Context {
|
||||
repos.AddSSHRepo(c.t, true, withCreds, fixture.RepoURLTypeSSH)
|
||||
repos.AddSSHRepo(c.T(), true, withCreds, fixture.RepoURLTypeSSH)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SubmoduleSSHRepoURLAdded(withCreds bool) *Context {
|
||||
fixture.CreateSubmoduleRepos(c.t, "ssh")
|
||||
repos.AddSSHRepo(c.t, false, withCreds, fixture.RepoURLTypeSSHSubmoduleParent)
|
||||
fixture.CreateSubmoduleRepos(c.T(), "ssh")
|
||||
repos.AddSSHRepo(c.T(), false, withCreds, fixture.RepoURLTypeSSHSubmoduleParent)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmRepoAdded(name string) *Context {
|
||||
repos.AddHelmRepo(c.t, name)
|
||||
repos.AddHelmRepo(c.T(), name)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmOCIRepoAdded(name string) *Context {
|
||||
repos.AddHelmOCIRepo(c.t, name)
|
||||
repos.AddHelmOCIRepo(c.T(), name)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushImageToOCIRegistry(pathName, tag string) *Context {
|
||||
repos.PushImageToOCIRegistry(c.t, pathName, tag)
|
||||
repos.PushImageToOCIRegistry(c.T(), pathName, tag)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushImageToAuthenticatedOCIRegistry(pathName, tag string) *Context {
|
||||
repos.PushImageToAuthenticatedOCIRegistry(c.t, pathName, tag)
|
||||
repos.PushImageToAuthenticatedOCIRegistry(c.T(), pathName, tag)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushChartToOCIRegistry(chartPathName, chartName, chartVersion string) *Context {
|
||||
repos.PushChartToOCIRegistry(c.t, chartPathName, chartName, chartVersion)
|
||||
repos.PushChartToOCIRegistry(c.T(), chartPathName, chartName, chartVersion)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushChartToAuthenticatedOCIRegistry(chartPathName, chartName, chartVersion string) *Context {
|
||||
repos.PushChartToAuthenticatedOCIRegistry(c.t, chartPathName, chartName, chartVersion)
|
||||
repos.PushChartToAuthenticatedOCIRegistry(c.T(), chartPathName, chartName, chartVersion)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSCredentialsUserPassAdded() *Context {
|
||||
repos.AddHTTPSCredentialsUserPass(c.t)
|
||||
repos.AddHTTPSCredentialsUserPass(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmHTTPSCredentialsUserPassAdded() *Context {
|
||||
repos.AddHelmHTTPSCredentialsTLSClientCert(c.t)
|
||||
repos.AddHelmHTTPSCredentialsTLSClientCert(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmoOCICredentialsWithoutUserPassAdded() *Context {
|
||||
repos.AddHelmoOCICredentialsWithoutUserPass(c.t)
|
||||
repos.AddHelmoOCICredentialsWithoutUserPass(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSCredentialsTLSClientCertAdded() *Context {
|
||||
repos.AddHTTPSCredentialsTLSClientCert(c.t)
|
||||
repos.AddHTTPSCredentialsTLSClientCert(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SSHCredentialsAdded() *Context {
|
||||
repos.AddSSHCredentials(c.t)
|
||||
repos.AddSSHCredentials(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) OCIRepoAdded(name, imagePath string) *Context {
|
||||
repos.AddOCIRepo(c.t, name, imagePath)
|
||||
repos.AddOCIRepo(c.T(), name, imagePath)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) AuthenticatedOCIRepoAdded(name, imagePath string) *Context {
|
||||
repos.AddAuthenticatedOCIRepo(c.t, name, imagePath)
|
||||
repos.AddAuthenticatedOCIRepo(c.T(), name, imagePath)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -250,8 +266,8 @@ func (c *Context) OCIRegistry(registry string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) ProjectSpec(spec v1alpha1.AppProjectSpec) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetProjectSpec(c.project, spec))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetProjectSpec(c.project, spec))
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -265,15 +281,6 @@ func (c *Context) RepoURLType(urlType fixture.RepoURLType) *Context {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) GetName() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.name = name
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) Path(path string) *Context {
|
||||
c.path = path
|
||||
return c
|
||||
@@ -336,6 +343,10 @@ func (c *Context) DestServer(destServer string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) DestName(destName string) *Context {
|
||||
if destName != "in-cluster" {
|
||||
suffix := "-" + c.ShortID()
|
||||
destName = fixture.DnsFriendly(strings.TrimSuffix(destName, suffix), suffix)
|
||||
}
|
||||
c.destName = destName
|
||||
c.isDestServerInferred = true
|
||||
return c
|
||||
@@ -368,14 +379,14 @@ func (c *Context) NameSuffix(nameSuffix string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) ResourceOverrides(overrides map[string]v1alpha1.ResourceOverride) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetResourceOverrides(overrides))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetResourceOverrides(overrides))
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) ResourceFilter(filter settings.ResourcesFilter) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetResourceFilter(filter))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetResourceFilter(filter))
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -445,14 +456,14 @@ func (c *Context) HelmSkipTests() *Context {
|
||||
}
|
||||
|
||||
func (c *Context) SetTrackingMethod(trackingMethod string) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetTrackingMethod(trackingMethod))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetTrackingMethod(trackingMethod))
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SetInstallationID(installationID string) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetInstallationID(installationID))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetInstallationID(installationID))
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -466,7 +477,7 @@ func (c *Context) Sources(sources []v1alpha1.ApplicationSource) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) RegisterKustomizeVersion(version, path string) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.RegisterKustomizeVersion(version, path))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.RegisterKustomizeVersion(version, path))
|
||||
return c
|
||||
}
|
||||
|
||||
87
test/e2e/fixture/app/context_cmp.go
Normal file
87
test/e2e/fixture/app/context_cmp.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmpserver/plugin"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
)
|
||||
|
||||
// RunningCMPServer starts a CMP server with the given config directory and waits for it to be ready.
|
||||
// It blocks until the CMP socket is created or times out after 10 seconds.
|
||||
func (c *Context) RunningCMPServer(configFile string) *Context {
|
||||
c.T().Helper()
|
||||
startCMPServer(c.T(), configFile)
|
||||
c.T().Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
return c
|
||||
}
|
||||
|
||||
// startCMPServer starts the CMP server and waits for its socket to be ready.
|
||||
// It blocks until the socket file is created or times out after 10 seconds.
|
||||
func startCMPServer(t *testing.T, configDir string) {
|
||||
t.Helper()
|
||||
pluginSockFilePath := path.Join(fixture.TmpDir(), fixture.PluginSockFilePath)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd-cmp-server")
|
||||
// ARGOCD_PLUGINSOCKFILEPATH should be set as the same value as repo server env var
|
||||
t.Setenv("ARGOCD_PLUGINSOCKFILEPATH", pluginSockFilePath)
|
||||
if _, err := os.Stat(pluginSockFilePath); os.IsNotExist(err) {
|
||||
err := os.Mkdir(pluginSockFilePath, 0o700)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Read plugin config to get expected socket path
|
||||
cfg, err := plugin.ReadPluginConfig(configDir)
|
||||
require.NoError(t, err, "failed to read plugin config from %s", configDir)
|
||||
expectedSocket := cfg.Address()
|
||||
|
||||
// Remove stale socket if it exists from a previous test run
|
||||
if err := os.Remove(expectedSocket); err != nil && !os.IsNotExist(err) {
|
||||
require.NoError(t, err, "failed to remove stale socket")
|
||||
}
|
||||
|
||||
// Start CMP server in goroutine (non-blocking)
|
||||
go func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunWithStdin("", "", "../../dist/argocd", "--config-dir-path", configDir))
|
||||
}()
|
||||
|
||||
// Wait for socket to be created
|
||||
waitForSocket(t, expectedSocket, 10*time.Second)
|
||||
}
|
||||
|
||||
// waitForSocket polls for a socket file to exist with exponential backoff
|
||||
func waitForSocket(t *testing.T, socketPath string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
sleepIntervals := []time.Duration{
|
||||
10 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
50 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
200 * time.Millisecond,
|
||||
500 * time.Millisecond,
|
||||
}
|
||||
sleepIdx := 0
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
if info, err := os.Stat(socketPath); err == nil {
|
||||
if info.Mode()&os.ModeSocket != 0 {
|
||||
return // Socket exists and is a socket!
|
||||
}
|
||||
}
|
||||
if sleepIdx < len(sleepIntervals) {
|
||||
time.Sleep(sleepIntervals[sleepIdx])
|
||||
sleepIdx++
|
||||
} else {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf("CMP socket %s did not appear within %v", socketPath, timeout)
|
||||
}
|
||||
@@ -272,9 +272,19 @@ func DoesNotExistNow() Expectation {
|
||||
}
|
||||
}
|
||||
|
||||
func App(predicate func(app *v1alpha1.Application) bool) Expectation {
|
||||
return func(c *Consequences) (state, string) {
|
||||
app := c.app().DeepCopy()
|
||||
if predicate(app) {
|
||||
return succeeded, "app predicate matches"
|
||||
}
|
||||
return pending, "app predicate does not match"
|
||||
}
|
||||
}
|
||||
|
||||
func Pod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
return func(_ *Consequences) (state, string) {
|
||||
pods, err := pods()
|
||||
return func(c *Consequences) (state, string) {
|
||||
pods, err := pods(c.context.DeploymentNamespace())
|
||||
if err != nil {
|
||||
return failed, err.Error()
|
||||
}
|
||||
@@ -288,8 +298,8 @@ func Pod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
}
|
||||
|
||||
func NotPod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
return func(_ *Consequences) (state, string) {
|
||||
pods, err := pods()
|
||||
return func(c *Consequences) (state, string) {
|
||||
pods, err := pods(c.context.DeploymentNamespace())
|
||||
if err != nil {
|
||||
return failed, err.Error()
|
||||
}
|
||||
@@ -302,9 +312,8 @@ func NotPod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
}
|
||||
}
|
||||
|
||||
func pods() (*corev1.PodList, error) {
|
||||
fixture.KubeClientset.CoreV1()
|
||||
pods, err := fixture.KubeClientset.CoreV1().Pods(fixture.DeploymentNamespace()).List(context.Background(), metav1.ListOptions{})
|
||||
func pods(namespace string) (*corev1.PodList, error) {
|
||||
pods, err := fixture.KubeClientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
@@ -320,7 +329,6 @@ func NoNamespace(name string) Expectation {
|
||||
}
|
||||
|
||||
func namespace(name string) (*corev1.Namespace, error) {
|
||||
fixture.KubeClientset.CoreV1()
|
||||
return fixture.KubeClientset.CoreV1().Namespaces().Get(context.Background(), name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user