mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-22 18:48:46 +01:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a3940d8db | ||
|
|
1bf62aea19 | ||
|
|
67c23193c4 | ||
|
|
326a1dbd6b | ||
|
|
d0b2a6cfd7 | ||
|
|
e464f6ae43 | ||
|
|
4b0a2c0ef2 | ||
|
|
8449d9a0f3 | ||
|
|
92df21cfc0 | ||
|
|
24493145a6 | ||
|
|
273683b647 | ||
|
|
a1d18559f5 | ||
|
|
8df5e96981 | ||
|
|
c4f0cd3e84 | ||
|
|
0038fce14d | ||
|
|
6f270cc8f4 | ||
|
|
61267982ab | ||
|
|
445916fdb0 | ||
|
|
54f29167a6 | ||
|
|
55d0d09802 | ||
|
|
2502af402d |
11
.github/workflows/ci-build.yaml
vendored
11
.github/workflows/ci-build.yaml
vendored
@@ -194,7 +194,7 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
@@ -271,13 +271,13 @@ jobs:
|
||||
# We need to vendor go modules for codegen yet
|
||||
go mod download
|
||||
go mod vendor -v
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
- name: Install toolchain for codegen
|
||||
run: |
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
@@ -431,9 +431,6 @@ jobs:
|
||||
- changes
|
||||
env:
|
||||
ARGOCD_FAKE_IN_CLUSTER: 'true'
|
||||
ARGOCD_SSH_DATA_PATH: '/tmp/argo-e2e/app/config/ssh'
|
||||
ARGOCD_TLS_DATA_PATH: '/tmp/argo-e2e/app/config/tls'
|
||||
ARGOCD_E2E_SSH_KNOWN_HOSTS: '../fixture/certs/ssh_known_hosts'
|
||||
ARGOCD_E2E_K3S: 'true'
|
||||
ARGOCD_IN_CI: 'true'
|
||||
ARGOCD_E2E_APISERVER_PORT: '8088'
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:27771fb7b40a58237c98e8d3e6b9ecdd9289cec69a857fccfb85ff36294dac20
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.10@sha256:5922638447b1e3ba114332c896a2c7288c876bb94adec923d70d58a17d2fec5e
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
|
||||
28
Makefile
28
Makefile
@@ -76,8 +76,10 @@ ARGOCD_E2E_REDIS_PORT?=6379
|
||||
ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
ARGOCD_E2E_DIR?=/tmp/argo-e2e
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
ARGOCD_E2E_RERUN_FAILS?=5
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
@@ -461,7 +463,7 @@ test-e2e:
|
||||
test-e2e-local: cli-local
|
||||
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
|
||||
export GO111MODULE=off
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=$(ARGOCD_E2E_RERUN_FAILS) PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
|
||||
# Spawns a shell in the test server container for debugging purposes
|
||||
debug-test-server: test-tools-image
|
||||
@@ -485,13 +487,13 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
kubectl create ns argocd-e2e-external || true
|
||||
kubectl create ns argocd-e2e-external-2 || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply --server-side -f -
|
||||
kustomize build test/manifests/base | kubectl apply --server-side --force-conflicts -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
|
||||
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
|
||||
if test -d $(ARGOCD_E2E_DIR)/app/config/gpg; then rm -rf $(ARGOCD_E2E_DIR)/app/config/gpg/*; fi
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/keys && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/keys
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/source && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/source
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/plugin && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/plugin
|
||||
# create folders to hold go coverage results for each component
|
||||
mkdir -p /tmp/coverage/app-controller
|
||||
mkdir -p /tmp/coverage/api-server
|
||||
@@ -500,13 +502,15 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
mkdir -p /tmp/coverage/notification
|
||||
mkdir -p /tmp/coverage/commit-server
|
||||
# set paths for locally managed ssh known hosts and tls certs data
|
||||
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_E2E_DIR=$(ARGOCD_E2E_DIR) \
|
||||
ARGOCD_SSH_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=$(ARGOCD_E2E_DIR)/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_GIT_CONFIG=$(PWD)/test/e2e/fixture/gitconfig \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
|
||||
4
Procfile
4
Procfile
@@ -2,7 +2,7 @@ controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v3/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: hack/start-redis-with-password.sh
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "export PATH=./dist:\$PATH && [ -n \"\$ARGOCD_GIT_CONFIG\" ] && export GIT_CONFIG_GLOBAL=\$ARGOCD_GIT_CONFIG && export GIT_CONFIG_NOSYSTEM=1; GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
commit-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/commit-server} FORCE_LOG_COLORS=1 ARGOCD_BINARY_NAME=argocd-commit-server $COMMAND --loglevel debug --port ${ARGOCD_E2E_COMMITSERVER_PORT:-8086}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
@@ -11,4 +11,4 @@ helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
oci-registry: test/fixture/testrepos/start-authenticated-helm-registry.sh
|
||||
dev-mounter: [ "$ARGOCD_E2E_TEST" != "true" ] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/applicationset-controller} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
|
||||
@@ -1137,13 +1137,13 @@ func (ctrl *ApplicationController) processProjectQueueItem() (processNext bool)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) finalizeProjectDeletion(proj *appv1.AppProject) error {
|
||||
apps, err := ctrl.appLister.Applications(ctrl.namespace).List(labels.Everything())
|
||||
apps, err := ctrl.appLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing applications: %w", err)
|
||||
}
|
||||
appsCount := 0
|
||||
for i := range apps {
|
||||
if apps[i].Spec.GetProject() == proj.Name {
|
||||
if apps[i].Spec.GetProject() == proj.Name && ctrl.isAppNamespaceAllowed(apps[i]) && proj.IsAppNamespacePermitted(apps[i], ctrl.namespace) {
|
||||
appsCount++
|
||||
}
|
||||
}
|
||||
@@ -1559,8 +1559,18 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
// if we just completed an operation, force a refresh so that UI will report up-to-date
|
||||
// sync/health information
|
||||
if _, err := cache.MetaNamespaceKeyFunc(app); err == nil {
|
||||
// force app refresh with using CompareWithLatest comparison type and trigger app reconciliation loop
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatestForceResolve.Pointer(), nil)
|
||||
var compareWith CompareWith
|
||||
if state.Operation.InitiatedBy.Automated {
|
||||
// Do not force revision resolution on automated operations because
|
||||
// this would cause excessive Ls-Remote requests on monorepo commits
|
||||
compareWith = CompareWithLatest
|
||||
} else {
|
||||
// Force app refresh with using most recent resolved revision after sync,
|
||||
// so UI won't show a just synced application being out of sync if it was
|
||||
// synced after commit but before app. refresh (see #18153)
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
}
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), compareWith.Pointer(), nil)
|
||||
} else {
|
||||
logCtx.WithError(err).Warn("Fails to requeue application")
|
||||
}
|
||||
|
||||
@@ -2302,6 +2302,93 @@ func TestFinalizeProjectDeletion_DoesNotHaveApplications(t *testing.T) {
|
||||
}, receivedPatch)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_HasApplicationInOtherNamespace(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Namespace = "team-a"
|
||||
proj := &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SourceNamespaces: []string{"team-a"},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, proj},
|
||||
applicationNamespaces: []string{"team-a"},
|
||||
}, nil)
|
||||
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
patched := false
|
||||
fakeAppCs.PrependReactor("patch", "*", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, &v1alpha1.AppProject{}, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, patched)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_IgnoresAppsInUnmonitoredNamespace(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Namespace = "team-b"
|
||||
proj := &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, proj},
|
||||
applicationNamespaces: []string{"team-a"},
|
||||
}, nil)
|
||||
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
receivedPatch := map[string]any{}
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.AppProject{}, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"finalizers": nil,
|
||||
},
|
||||
}, receivedPatch)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_IgnoresAppsNotPermittedByProject(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Namespace = "team-b"
|
||||
proj := &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SourceNamespaces: []string{"team-a"},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, proj},
|
||||
applicationNamespaces: []string{"team-a", "team-b"},
|
||||
}, nil)
|
||||
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
receivedPatch := map[string]any{}
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.AppProject{}, nil
|
||||
})
|
||||
|
||||
err := ctrl.finalizeProjectDeletion(proj)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"finalizers": nil,
|
||||
},
|
||||
}, receivedPatch)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
@@ -2546,6 +2633,41 @@ func TestProcessRequestedAppOperation_Successful(t *testing.T) {
|
||||
assert.Equal(t, CompareWithLatestForceResolve, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppAutomatedOperation_Successful(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
InitiatedBy: v1alpha1.OperationInitiator{
|
||||
Automated: true,
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(t.Context(), &fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponses: []*apiclient.ManifestResponse{{
|
||||
Manifests: []string{},
|
||||
}},
|
||||
}, nil)
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
receivedPatch := map[string]any{}
|
||||
fakeAppCs.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &receivedPatch))
|
||||
}
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
|
||||
assert.Equal(t, "successfully synced (no more tasks)", message)
|
||||
ok, level := ctrl.isRefreshRequested(ctrl.toAppKey(app.Name))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, CompareWithLatest, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_SyncTimeout(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -3,29 +3,33 @@
|
||||
The test [directory](https://github.com/argoproj/argo-cd/tree/master/test) contains E2E tests and test applications. The tests assume that Argo CD services are installed into `argocd-e2e` namespace or cluster in current context. A throw-away
|
||||
namespace `argocd-e2e***` is created prior to the execution of the tests. The throw-away namespace is used as a target namespace for test applications.
|
||||
|
||||
The [/test/e2e/testdata](https://github.com/argoproj/argo-cd/tree/master/test/e2e/testdata) directory contains various Argo CD applications. Before test execution, the directory is copied into `/tmp/argo-e2e***` temp directory and used in tests as a
|
||||
The [/test/e2e/testdata](https://github.com/argoproj/argo-cd/tree/master/test/e2e/testdata) directory contains various Argo CD applications. Before test execution, the directory is copied into `/tmp/argo-e2e***` temp directory (configurable by `ARGOCD_E2E_DIR`) and used in tests as a
|
||||
Git repository via file url: `file:///tmp/argo-e2e***`.
|
||||
|
||||
> [!NOTE]
|
||||
> You might get an error such as `unable to ls-remote HEAD on repository: failed to list refs: repository not found` when querying the local repository exposed through the e2e server running in a container.
|
||||
> This is often caused by `/tmp` directoring sharing protection. You can configure a different directory with `ARGOCD_E2E_DIR`, or disable the directory sharing protection.
|
||||
>
|
||||
> **Rancher Desktop Volume Sharing**
|
||||
>
|
||||
> The e2e git server runs in a container. If you are using Rancher Desktop, you will need to enable volume sharing for
|
||||
> the e2e container to access the testdata directory. To do this, add the following to
|
||||
> To do enable `/tmp` sharing, add the following to
|
||||
> `~/Library/Application\ Support/rancher-desktop/lima/_config/override.yaml` and restart Rancher Desktop:
|
||||
>
|
||||
> ```yaml
|
||||
> mounts:
|
||||
> - location: /private/tmp
|
||||
> writable: true
|
||||
> - location: /private/tmp
|
||||
> writable: true
|
||||
> ```
|
||||
|
||||
## Running Tests Locally
|
||||
|
||||
### With virtualized chain
|
||||
|
||||
1. Start the e2e version `make start-e2e`
|
||||
2. Run the tests: `make test-e2e`
|
||||
|
||||
### With local chain
|
||||
|
||||
1. Start the e2e version `make start-e2e-local`
|
||||
2. Run the tests: `make test-e2e-local`
|
||||
|
||||
@@ -37,32 +41,32 @@ You can observe the tests by using the UI [http://localhost:4000/applications](h
|
||||
|
||||
The Makefile's `start-e2e` target starts instances of ArgoCD on your local machine, of which the most will require a network listener. If, for any reason, your machine already has network services listening on the same ports, then the e2e tests will not run. You can derive from the defaults by setting the following environment variables before you run `make start-e2e`:
|
||||
|
||||
* `ARGOCD_E2E_APISERVER_PORT`: Listener port for `argocd-server` (default: `8080`)
|
||||
* `ARGOCD_E2E_REPOSERVER_PORT`: Listener port for `argocd-reposerver` (default: `8081`)
|
||||
* `ARGOCD_E2E_DEX_PORT`: Listener port for `dex` (default: `5556`)
|
||||
* `ARGOCD_E2E_REDIS_PORT`: Listener port for `redis` (default: `6379`)
|
||||
* `ARGOCD_E2E_YARN_CMD`: Command to use for starting the UI via Yarn (default: `yarn`)
|
||||
- `ARGOCD_E2E_APISERVER_PORT`: Listener port for `argocd-server` (default: `8080`)
|
||||
- `ARGOCD_E2E_REPOSERVER_PORT`: Listener port for `argocd-reposerver` (default: `8081`)
|
||||
- `ARGOCD_E2E_DEX_PORT`: Listener port for `dex` (default: `5556`)
|
||||
- `ARGOCD_E2E_REDIS_PORT`: Listener port for `redis` (default: `6379`)
|
||||
- `ARGOCD_E2E_YARN_CMD`: Command to use for starting the UI via Yarn (default: `yarn`)
|
||||
- `ARGOCD_E2E_DIR`: Local path to the repository to use for ephemeral test data
|
||||
|
||||
If you have changed the port for `argocd-server`, be sure to also set `ARGOCD_SERVER` environment variable to point to that port, e.g. `export ARGOCD_SERVER=localhost:8888` before running `make test-e2e` so that the test will communicate to the correct server component.
|
||||
|
||||
|
||||
## Test Isolation
|
||||
|
||||
Some effort has been made to balance test isolation with speed. Tests are isolated as follows as each test gets:
|
||||
|
||||
* A random 5 character ID.
|
||||
* A unique Git repository containing the `testdata` in `/tmp/argo-e2e/${id}`.
|
||||
* A namespace `argocd-e2e-ns-${id}`.
|
||||
* A primary name for the app `argocd-e2e-${id}`.
|
||||
- A random 5 character ID.
|
||||
- A unique Git repository containing the `testdata` in `/tmp/argo-e2e/${id}`.
|
||||
- A namespace `argocd-e2e-ns-${id}`.
|
||||
- A primary name for the app `argocd-e2e-${id}`.
|
||||
|
||||
## Run only a subset of tests
|
||||
|
||||
Running all tests locally is a time-consuming process. To run only a subset of tests, you can set the `TEST_MODULE` environment variable.
|
||||
For example, to run only the OCI tests, you can set the variable as follows: `make TEST_MODULE=./test/e2e/oci_test.go test-e2e-local`
|
||||
Running all tests locally is a time-consuming process. To run only a subset of tests, you can set the `TEST_MODULE` environment variable.
|
||||
For example, to run only the OCI tests, you can set the variable as follows: `make TEST_MODULE=./test/e2e/oci_test.go test-e2e-local`
|
||||
|
||||
If you want to get a more fine-grained control over which tests to run, you can also try `make TEST_FLAGS="-run <TEST_METHOD_NAME_REGEXP>" test-e2e-local`
|
||||
For individual tests you can run them using the IDE run test feature
|
||||
|
||||
For individual tests you can run them using the IDE run test feature
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Tests fails to delete `argocd-e2e-ns-*` namespaces.**
|
||||
|
||||
@@ -27,6 +27,12 @@ When Argo CD is upgraded manually using plain manifests or Kustomize overlays, i
|
||||
|
||||
Users upgrading Argo CD manually using `helm upgrade` are not impacted by this change, since Helm does not use client-side apply and does not result in creation of the `last-applied` annotation.
|
||||
|
||||
#### Users who previously upgraded to 3.3.0 or 3.3.1
|
||||
In some cases, after upgrading to one of those versions and applying Server-Side Apply, the following error occured:
|
||||
`one or more synchronization tasks completed unsuccessfully, reason: Failed to perform client-side apply migration: failed to perform client-side apply migration on manager kubectl-client-side-apply: error when patching "/dev/shm/2047509016": CustomResourceDefinition.apiextensions.k8s.io "applicationsets.argoproj.io" is invalid: metadata.annotations: Too long: may not be more than 262144 bytes`.
|
||||
|
||||
Users that have configured the sync option `ClientSideApplyMigration=false` as a temporary remediation for the above error, should remove it after upgrading to `3.3.2`. Disabling `ClientSideApplyMigration` imposes a risk to encounter conflicts between K8s field managers in the future.
|
||||
|
||||
### Source Hydrator Now Tracks Hydration State Using Git Notes
|
||||
|
||||
Previously, Argo CD's Source Hydrator pushed a new hydrated commit for every DRY (source) commit, regardless of whether any manifest files (`manifest.yaml`) actually changed. This was necessary for the hydrator to track which DRY commit had last been hydrated: it embedded this information in the `hydrator.metadata` file's `drySha` field in each hydrated commit.
|
||||
@@ -89,11 +95,11 @@ removed in a future release.
|
||||
Argo CD v3.3 upgrades the bundled Helm version to 3.19.4. There are no breaking changes in Helm 3.19.4 according to the
|
||||
[release notes](https://github.com/helm/helm/releases/tag/v3.19.0).
|
||||
|
||||
## Kustomize Upgraded to 5.8.0
|
||||
## Kustomize Upgraded to 5.8.1
|
||||
|
||||
Argo CD v3.3 upgrades the bundled Kustomize version from v5.7.0 to v5.8.0. According to the
|
||||
Argo CD v3.3 upgrades the bundled Kustomize version from v5.7.0 to v5.8.1. According to the
|
||||
[5.7.1](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv5.7.1)
|
||||
and [5.8.0](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv5.8.0) release notes, there are no breaking changes.
|
||||
and [5.8.1](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv5.8.1) release notes, there are no breaking changes.
|
||||
|
||||
However, note that Kustomize 5.7.1 introduces code to replace the `shlex` library used for parsing arguments in exec plugins.
|
||||
If any existing manifests become corrupted, please follow the instructions in the
|
||||
|
||||
@@ -330,9 +330,10 @@ This is useful when you have other operators managing resources that are no long
|
||||
When client-side apply migration is enabled:
|
||||
1. Argo CD will use the specified field manager (or default if not specified) to perform migration
|
||||
2. During a server-side apply sync operation, it will:
|
||||
- Perform a client-side-apply with the specified field manager
|
||||
- Move the 'last-applied-configuration' annotation to be managed by the specified manager
|
||||
- Perform the server-side apply, which will auto migrate all the fields under the manager that owns the 'last-applied-configuration' annotation.
|
||||
- Check if the specified field manager exists in the resource's `managedFields` with `operation: Update` (indicating client-side apply)
|
||||
- Patch the `managedFields`, transferring field ownership from the client-side apply manager to Argo CD's server-side apply manager (`argocd-controller`)
|
||||
- Remove the client-side apply manager entry from `managedFields`
|
||||
- Perform the server-side apply with the migrated field ownership
|
||||
|
||||
This feature is based on Kubernetes' [client-side apply migration KEP](https://github.com/alexzielenski/enhancements/blob/03df8820b9feca6d2cab78e303c99b2c9c0c4c5c/keps/sig-cli/3517-kubectl-client-side-apply-migration/README.md), which provides the auto migration from client-side to server-side apply.
|
||||
|
||||
|
||||
@@ -17,10 +17,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/csaupgrade"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@@ -1110,7 +1113,9 @@ func (sc *syncContext) shouldUseServerSideApply(targetObj *unstructured.Unstruct
|
||||
}
|
||||
|
||||
// needsClientSideApplyMigration checks if a resource has fields managed by the specified manager
|
||||
// that need to be migrated to the server-side apply manager
|
||||
// with operation "Update" (client-side apply) that need to be migrated to server-side apply.
|
||||
// Client-side apply uses operation "Update", while server-side apply uses operation "Apply".
|
||||
// We only migrate managers with "Update" operation to avoid re-migrating already-migrated managers.
|
||||
func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstructured, fieldManager string) bool {
|
||||
if liveObj == nil || fieldManager == "" {
|
||||
return false
|
||||
@@ -1122,7 +1127,9 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
}
|
||||
|
||||
for _, field := range managedFields {
|
||||
if field.Manager == fieldManager {
|
||||
// Only consider managers with operation "Update" (client-side apply).
|
||||
// Managers with operation "Apply" are already using server-side apply.
|
||||
if field.Manager == fieldManager && field.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1130,29 +1137,70 @@ func (sc *syncContext) needsClientSideApplyMigration(liveObj *unstructured.Unstr
|
||||
return false
|
||||
}
|
||||
|
||||
// performClientSideApplyMigration performs a client-side-apply using the specified field manager.
|
||||
// This moves the 'last-applied-configuration' field to be managed by the specified manager.
|
||||
// The next time server-side apply is performed, kubernetes automatically migrates all fields from the manager
|
||||
// that owns 'last-applied-configuration' to the manager that uses server-side apply. This will remove the
|
||||
// specified manager from the resources managed fields. 'kubectl-client-side-apply' is used as the default manager.
|
||||
func (sc *syncContext) performClientSideApplyMigration(targetObj *unstructured.Unstructured, fieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(targetObj)).V(1).Info("Performing client-side apply migration step")
|
||||
// performCSAUpgradeMigration uses the csaupgrade package to migrate managed fields
|
||||
// from a client-side apply manager (operation: Update) to the server-side apply manager.
|
||||
// This directly patches the managedFields to transfer field ownership, avoiding the need
|
||||
// to write the last-applied-configuration annotation (which has a 262KB size limit).
|
||||
// This is the primary method for CSA to SSA migration in ArgoCD.
|
||||
func (sc *syncContext) performCSAUpgradeMigration(liveObj *unstructured.Unstructured, csaFieldManager string) error {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Performing csaupgrade-based migration")
|
||||
|
||||
// Apply with the specified manager to set up the migration
|
||||
_, err := sc.resourceOps.ApplyResource(
|
||||
context.TODO(),
|
||||
targetObj,
|
||||
cmdutil.DryRunNone,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
fieldManager,
|
||||
)
|
||||
// Get the dynamic resource interface for the live object
|
||||
gvk := liveObj.GroupVersionKind()
|
||||
apiResource, err := kubeutil.ServerResourceForGroupVersionKind(sc.disco, gvk, "patch")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to perform client-side apply migration on manager %s: %w", fieldManager, err)
|
||||
return fmt.Errorf("failed to get api resource for %s: %w", gvk, err)
|
||||
}
|
||||
res := kubeutil.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
|
||||
resIf := kubeutil.ToResourceInterface(sc.dynamicIf, apiResource, res, liveObj.GetNamespace())
|
||||
|
||||
return nil
|
||||
// Use retry to handle conflicts if managed fields changed between reconciliation and now
|
||||
//nolint:wrapcheck // error is wrapped inside the retry function
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Fetch fresh object to get current managed fields state
|
||||
freshObj, getErr := resIf.Get(context.TODO(), liveObj.GetName(), metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get fresh object for CSA migration: %w", getErr)
|
||||
}
|
||||
|
||||
// Check if migration is still needed with fresh state
|
||||
if !sc.needsClientSideApplyMigration(freshObj, csaFieldManager) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"CSA migration no longer needed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate the migration patch using the csaupgrade package
|
||||
// This unions the CSA manager's fields into the SSA manager and removes the CSA manager entry
|
||||
patchData, patchErr := csaupgrade.UpgradeManagedFieldsPatch(
|
||||
freshObj,
|
||||
sets.New(csaFieldManager),
|
||||
sc.serverSideApplyManager,
|
||||
)
|
||||
if patchErr != nil {
|
||||
return fmt.Errorf("failed to generate csaupgrade migration patch: %w", patchErr)
|
||||
}
|
||||
if patchData == nil {
|
||||
// No migration needed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply the migration patch to transfer field ownership.
|
||||
_, patchErr = resIf.Patch(context.TODO(), liveObj.GetName(), types.JSONPatchType, patchData, metav1.PatchOptions{})
|
||||
if patchErr != nil {
|
||||
if apierrors.IsConflict(patchErr) {
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Retrying CSA migration due to conflict")
|
||||
}
|
||||
// Return the error unmodified so RetryOnConflict can identify conflicts correctly.
|
||||
return patchErr
|
||||
}
|
||||
|
||||
sc.log.WithValues("resource", kubeutil.GetResourceKey(liveObj)).V(1).Info(
|
||||
"Successfully migrated managed fields using csaupgrade")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.ResultCode, string) {
|
||||
@@ -1173,11 +1221,14 @@ func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.R
|
||||
serverSideApply := sc.shouldUseServerSideApply(t.targetObj, dryRun)
|
||||
|
||||
// Check if we need to perform client-side apply migration for server-side apply
|
||||
// Perform client-side apply migration for server-side apply
|
||||
// This uses csaupgrade to directly patch managedFields, transferring ownership
|
||||
// from CSA managers (operation: Update) to the SSA manager (argocd-controller)
|
||||
if serverSideApply && !dryRun && sc.enableClientSideApplyMigration {
|
||||
if sc.needsClientSideApplyMigration(t.liveObj, sc.clientSideApplyMigrationManager) {
|
||||
err = sc.performClientSideApplyMigration(t.targetObj, sc.clientSideApplyMigrationManager)
|
||||
err = sc.performCSAUpgradeMigration(t.liveObj, sc.clientSideApplyMigrationManager)
|
||||
if err != nil {
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration: %v", err)
|
||||
return common.ResultCodeSyncFailed, fmt.Sprintf("Failed to perform client-side apply migration for %s: %v", kubeutil.GetResourceKey(t.liveObj), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2417,6 +2417,21 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}(),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "CSA manager with Apply operation should not need migration",
|
||||
liveObj: func() *unstructured.Unstructured {
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{}}}`)},
|
||||
},
|
||||
})
|
||||
return obj
|
||||
}(),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -2427,6 +2442,129 @@ func TestNeedsClientSideApplyMigration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_NoMigrationNeeded(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Object with only SSA manager (operation: Apply), no CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "argocd-controller",
|
||||
Operation: metav1.ManagedFieldsOperationApply,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:containers":{}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Should return nil (no error) because there's no CSA manager to migrate
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_WithCSAManager(t *testing.T) {
|
||||
// Create a fake dynamic client with a Pod scheme
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
// Create the live object with a CSA manager (operation: Update)
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
// Create fake dynamic client with the object
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get the updated object from the fake client
|
||||
gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
||||
updatedObj, err := dynamicClient.Resource(gvr).Namespace(obj.GetNamespace()).Get(context.TODO(), obj.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the CSA manager (operation: Update) no longer exists
|
||||
managedFields := updatedObj.GetManagedFields()
|
||||
for _, mf := range managedFields {
|
||||
if mf.Manager == "kubectl-client-side-apply" && mf.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
t.Errorf("CSA manager 'kubectl-client-side-apply' with operation Update should have been removed, but still exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPerformCSAUpgradeMigration_ConflictRetry(t *testing.T) {
|
||||
// This test verifies that when a 409 Conflict occurs on the patch because
|
||||
// another actor modified the object between Get and Patch, changing the resourceVersion,
|
||||
// the retry.RetryOnConflict loop retries and eventually succeeds.
|
||||
scheme := runtime.NewScheme()
|
||||
_ = corev1.AddToScheme(scheme)
|
||||
|
||||
obj := testingutils.NewPod()
|
||||
obj.SetNamespace(testingutils.FakeArgoCDNamespace)
|
||||
obj.SetManagedFields([]metav1.ManagedFieldsEntry{
|
||||
{
|
||||
Manager: "kubectl-client-side-apply",
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:metadata":{"f:labels":{"f:app":{}}}}`)},
|
||||
},
|
||||
})
|
||||
|
||||
dynamicClient := fake.NewSimpleDynamicClient(scheme, obj)
|
||||
|
||||
// Simulate a conflict on the first patch attempt where another
|
||||
// controller modified the object between our Get and Patch, bumping resourceVersion).
|
||||
// The second attempt should succeed.
|
||||
patchAttempt := 0
|
||||
dynamicClient.PrependReactor("patch", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patchAttempt++
|
||||
if patchAttempt == 1 {
|
||||
// First attempt: simulate 409 Conflict (resourceVersion mismatch)
|
||||
return true, nil, apierrors.NewConflict(
|
||||
schema.GroupResource{Group: "", Resource: "pods"},
|
||||
obj.GetName(),
|
||||
errors.New("the object has been modified; please apply your changes to the latest version"),
|
||||
)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
syncCtx := newTestSyncCtx(nil)
|
||||
syncCtx.serverSideApplyManager = "argocd-controller"
|
||||
syncCtx.dynamicIf = dynamicClient
|
||||
syncCtx.disco = &fakedisco.FakeDiscovery{
|
||||
Fake: &testcore.Fake{Resources: testingutils.StaticAPIResources},
|
||||
}
|
||||
|
||||
err := syncCtx.performCSAUpgradeMigration(obj, "kubectl-client-side-apply")
|
||||
assert.NoError(t, err, "Migration should succeed after retrying on conflict")
|
||||
assert.Equal(t, 2, patchAttempt, "Expected exactly 2 patch attempts (1 conflict + 1 success)")
|
||||
}
|
||||
|
||||
func diffResultListClusterResource() *diff.DiffResultList {
|
||||
ns1 := testingutils.NewNamespace()
|
||||
ns1.SetName("ns-1")
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
ee7cf0c1e3592aa7bb66ba82b359933a95e7f2e0b36e5f53ed0a4535b017f2f8 kustomize_5.8.1_darwin_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
8886f8a78474e608cc81234f729fda188a9767da23e28925802f00ece2bab288 kustomize_5.8.1_darwin_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
029a7f0f4e1932c52a0476cf02a0fd855c0bb85694b82c338fc648dcb53a819d kustomize_5.8.1_linux_amd64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
0953ea3e476f66d6ddfcd911d750f5167b9365aa9491b2326398e289fef2c142 kustomize_5.8.1_linux_arm64.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
87ffa6d248d6baceb35463042da354a317bfd3ee49afc7f9850c05c36319c708 kustomize_5.8.1_linux_ppc64le.tar.gz
|
||||
@@ -0,0 +1 @@
|
||||
12df0fcec017a82b41d87b85c53263ae9657740b130eba42381bae3495521c9b kustomize_5.8.1_linux_s390x.tar.gz
|
||||
@@ -9,6 +9,10 @@ which gotestsum || go install gotest.tools/gotestsum@latest
|
||||
|
||||
TEST_RESULTS=${TEST_RESULTS:-test-results}
|
||||
TEST_FLAGS=${TEST_FLAGS:-}
|
||||
DIST_DIR=${DIST_DIR:-dist}
|
||||
|
||||
# Add DIST_DIR to PATH so binaries installed for argo are found first
|
||||
export PATH="${DIST_DIR}:${PATH}"
|
||||
|
||||
if test "${ARGOCD_TEST_PARALLELISM:-}" != ""; then
|
||||
TEST_FLAGS="$TEST_FLAGS -p $ARGOCD_TEST_PARALLELISM"
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
# add-kustomize-checksums.sh to help download checksums.
|
||||
###############################################################################
|
||||
helm3_version=3.19.4
|
||||
kustomize5_version=5.8.0
|
||||
kustomize5_version=5.8.1
|
||||
protoc_version=29.3
|
||||
oras_version=1.2.0
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.0
|
||||
newTag: v3.3.2
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.0
|
||||
newTag: v3.3.2
|
||||
resources:
|
||||
- ./application-controller
|
||||
- ./dex
|
||||
|
||||
12
manifests/core-install-with-hydrator.yaml
generated
12
manifests/core-install-with-hydrator.yaml
generated
@@ -31273,7 +31273,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31408,7 +31408,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31536,7 +31536,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31833,7 +31833,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31886,7 +31886,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32234,7 +32234,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
10
manifests/core-install.yaml
generated
10
manifests/core-install.yaml
generated
@@ -31241,7 +31241,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31370,7 +31370,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -31667,7 +31667,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -31720,7 +31720,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32068,7 +32068,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -12,4 +12,4 @@ resources:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.0
|
||||
newTag: v3.3.2
|
||||
|
||||
@@ -12,7 +12,7 @@ patches:
|
||||
images:
|
||||
- name: quay.io/argoproj/argocd
|
||||
newName: quay.io/argoproj/argocd
|
||||
newTag: v3.3.0
|
||||
newTag: v3.3.2
|
||||
resources:
|
||||
- ../../base/application-controller
|
||||
- ../../base/applicationset-controller
|
||||
|
||||
18
manifests/ha/install-with-hydrator.yaml
generated
18
manifests/ha/install-with-hydrator.yaml
generated
@@ -32639,7 +32639,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32774,7 +32774,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32925,7 +32925,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -33021,7 +33021,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -33145,7 +33145,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33468,7 +33468,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33521,7 +33521,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33895,7 +33895,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34279,7 +34279,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/install.yaml
generated
16
manifests/ha/install.yaml
generated
@@ -32609,7 +32609,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -32761,7 +32761,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32857,7 +32857,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32981,7 +32981,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -33304,7 +33304,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -33357,7 +33357,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -33731,7 +33731,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -34115,7 +34115,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
18
manifests/ha/namespace-install-with-hydrator.yaml
generated
@@ -1897,7 +1897,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2032,7 +2032,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2183,7 +2183,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2279,7 +2279,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2403,7 +2403,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2726,7 +2726,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2779,7 +2779,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -3153,7 +3153,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3537,7 +3537,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/ha/namespace-install.yaml
generated
16
manifests/ha/namespace-install.yaml
generated
@@ -1867,7 +1867,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -2019,7 +2019,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -2115,7 +2115,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -2239,7 +2239,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -2562,7 +2562,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -2615,7 +2615,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2989,7 +2989,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -3373,7 +3373,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/install-with-hydrator.yaml
generated
18
manifests/install-with-hydrator.yaml
generated
@@ -31717,7 +31717,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31852,7 +31852,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32003,7 +32003,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -32099,7 +32099,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32201,7 +32201,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32498,7 +32498,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32551,7 +32551,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32923,7 +32923,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33307,7 +33307,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/install.yaml
generated
16
manifests/install.yaml
generated
@@ -31685,7 +31685,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -31837,7 +31837,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -31933,7 +31933,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -32035,7 +32035,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -32332,7 +32332,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -32385,7 +32385,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -32757,7 +32757,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -33141,7 +33141,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
18
manifests/namespace-install-with-hydrator.yaml
generated
18
manifests/namespace-install-with-hydrator.yaml
generated
@@ -975,7 +975,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1110,7 +1110,7 @@ spec:
|
||||
key: log.format.timestamp
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1261,7 +1261,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1357,7 +1357,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1459,7 +1459,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1756,7 +1756,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1809,7 +1809,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2181,7 +2181,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2565,7 +2565,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
16
manifests/namespace-install.yaml
generated
16
manifests/namespace-install.yaml
generated
@@ -943,7 +943,7 @@ spec:
|
||||
key: applicationsetcontroller.status.max.resources.count
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-applicationset-controller
|
||||
ports:
|
||||
@@ -1095,7 +1095,7 @@ spec:
|
||||
- -n
|
||||
- /usr/local/bin/argocd
|
||||
- /shared/argocd-dex
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: copyutil
|
||||
securityContext:
|
||||
@@ -1191,7 +1191,7 @@ spec:
|
||||
key: notificationscontroller.repo.server.plaintext
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
@@ -1293,7 +1293,7 @@ spec:
|
||||
- argocd
|
||||
- admin
|
||||
- redis-initial-password
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: secret-init
|
||||
securityContext:
|
||||
@@ -1590,7 +1590,7 @@ spec:
|
||||
value: /helm-working-dir
|
||||
- name: HELM_DATA_HOME
|
||||
value: /helm-working-dir
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
@@ -1643,7 +1643,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
name: copyutil
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -2015,7 +2015,7 @@ spec:
|
||||
key: server.sync.replace.allowed
|
||||
name: argocd-cmd-params-cm
|
||||
optional: true
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -2399,7 +2399,7 @@ spec:
|
||||
optional: true
|
||||
- name: KUBECACHEDIR
|
||||
value: /tmp/kubecache
|
||||
image: quay.io/argoproj/argocd:v3.3.0
|
||||
image: quay.io/argoproj/argocd:v3.3.2
|
||||
imagePullPolicy: Always
|
||||
name: argocd-application-controller
|
||||
ports:
|
||||
|
||||
@@ -8,7 +8,8 @@ sonar.projectVersion=1.0
|
||||
# Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows.
|
||||
sonar.sources=.
|
||||
|
||||
sonar.exclusions=docs/**
|
||||
# Exclude docs and testdata with kustomization files (Sonar IaC parser fails on empty/edge-case YAML)
|
||||
sonar.exclusions=docs/**,**/testdata/**
|
||||
|
||||
# Encoding of the source code. Default is default system encoding
|
||||
sonar.sourceEncoding=UTF-8
|
||||
@@ -24,5 +25,5 @@ sonar.go.exclusions=**/vendor/**,**/*.pb.go,**/*_test.go,**/*.pb.gw.go,**/mocks/
|
||||
# Exclude following set of patterns from duplication detection
|
||||
sonar.cpd.exclusions=**/*.pb.go,**/*.g.cs,**/*.gw.go,**/mocks/*,docs/**
|
||||
|
||||
# Exclude test manifests from analysis
|
||||
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**
|
||||
# Exclude test manifests from analysis (avoids Sonar IaC parser errors on empty/edge-case kustomization files)
|
||||
sonar.kubernetes.exclusions=controller/testdata/**,test/**,util/kustomize/testdata/**,util/app/discovery/testdata/**,reposerver/repository/testdata/**
|
||||
|
||||
@@ -11,5 +11,5 @@ fcgiwrap: sudo sh -c "test $ARGOCD_E2E_TEST = true && (fcgiwrap -s unix:/var/run
|
||||
nginx: sudo sh -c "test $ARGOCD_E2E_TEST = true && nginx -g 'daemon off;' -c $(pwd)/test/fixture/testrepos/nginx.conf"
|
||||
helm-registry: sudo sh -c "OTEL_TRACES_EXPORTER=none REGISTRY_LOG_LEVEL=info registry serve /etc/docker/registry/config.yml"
|
||||
dev-mounter: test "$ARGOCD_E2E_TEST" != "true" && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS=${ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS:-true} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: sh -c "FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications go run ./cmd/main.go --loglevel debug"
|
||||
|
||||
@@ -19,7 +19,10 @@ func TestBackupExportImport(t *testing.T) {
|
||||
var exportRawOutput string
|
||||
ctx := Given(t)
|
||||
// Create application in argocd namespace
|
||||
appctx := appfixture.GivenWithSameState(t)
|
||||
appctx := appfixture.GivenWithSameState(ctx)
|
||||
|
||||
var appTestNamespace Application
|
||||
var appOtherNamespace Application
|
||||
|
||||
// Create application in test namespace
|
||||
appctx.
|
||||
@@ -29,8 +32,9 @@ func TestBackupExportImport(t *testing.T) {
|
||||
CreateApp().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, "exported-app1", app.Name)
|
||||
assert.Equal(t, fixture.TestNamespace(), app.Namespace)
|
||||
assert.Equal(t, appctx.AppName(), app.Name)
|
||||
assert.Equal(t, appctx.AppNamespace(), app.Namespace)
|
||||
appTestNamespace = *app
|
||||
})
|
||||
|
||||
// Create app in other namespace
|
||||
@@ -42,8 +46,9 @@ func TestBackupExportImport(t *testing.T) {
|
||||
CreateApp().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, "exported-app-other-namespace", app.Name)
|
||||
assert.Equal(t, fixture.AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, appctx.AppName(), app.Name)
|
||||
assert.Equal(t, appctx.AppNamespace(), app.Namespace)
|
||||
appOtherNamespace = *app
|
||||
})
|
||||
|
||||
ctx.
|
||||
@@ -57,8 +62,8 @@ func TestBackupExportImport(t *testing.T) {
|
||||
AndExportedResources(func(exportResources *ExportedResources, err error) {
|
||||
require.NoError(t, err, "export format not valid")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey("", "ConfigMap", "", "argocd-cm")), "argocd-cm not found in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, "", "exported-app1")), "test namespace application not in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, fixture.AppNamespace(), "exported-app-other-namespace")), "app namespace application not in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, "", appTestNamespace.GetName())), "test namespace application not in export")
|
||||
assert.True(t, exportResources.HasResource(kube.NewResourceKey(ApplicationSchemaGroupVersionKind.Group, ApplicationSchemaGroupVersionKind.Kind, appOtherNamespace.GetNamespace(), appOtherNamespace.GetName())), "app namespace application not in export")
|
||||
})
|
||||
|
||||
// Test import - clean state
|
||||
@@ -70,9 +75,9 @@ func TestBackupExportImport(t *testing.T) {
|
||||
Then().
|
||||
AndCLIOutput(func(_ string, err error) {
|
||||
require.NoError(t, err, "import finished with error")
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.TestNamespace()).Get(t.Context(), "exported-app1", metav1.GetOptions{})
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(appTestNamespace.GetNamespace()).Get(t.Context(), appTestNamespace.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err, "failed getting test namespace application after import")
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.AppNamespace()).Get(t.Context(), "exported-app-other-namespace", metav1.GetOptions{})
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().Applications(appOtherNamespace.GetNamespace()).Get(t.Context(), appOtherNamespace.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err, "failed getting app namespace application after import")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func TestNSAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
Given(t).
|
||||
SetTrackingMethod("annotation").
|
||||
ctx := Given(t)
|
||||
ctx.SetTrackingMethod("annotation").
|
||||
Path(guestbookPath).
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
// TODO: There is a bug with annotation tracking method that prevents
|
||||
@@ -37,7 +37,7 @@ func TestNSAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
// app should not be auto-synced if k8s change detected
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Then().
|
||||
@@ -45,8 +45,8 @@ func TestNSAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNSAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
Given(t).
|
||||
SetTrackingMethod("annotation").
|
||||
ctx := Given(t)
|
||||
ctx.SetTrackingMethod("annotation").
|
||||
Path(guestbookPath).
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
When().
|
||||
@@ -63,7 +63,7 @@ func TestNSAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
When().
|
||||
// app should be auto-synced once k8s change detected
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
)
|
||||
|
||||
func TestAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
Given(t).
|
||||
Path(guestbookPath).
|
||||
ctx := Given(t)
|
||||
ctx.Path(guestbookPath).
|
||||
When().
|
||||
// app should be auto-synced once created
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -36,7 +36,7 @@ func TestAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
// app should not be auto-synced if k8s change detected
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
@@ -45,8 +45,8 @@ func TestAutoSyncSelfHealDisabled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
Given(t).
|
||||
Path(guestbookPath).
|
||||
ctx := Given(t)
|
||||
ctx.Path(guestbookPath).
|
||||
When().
|
||||
// app should be auto-synced once created
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -61,7 +61,7 @@ func TestAutoSyncSelfHealEnabled(t *testing.T) {
|
||||
When().
|
||||
// app should be auto-synced once k8s change detected
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 0}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
Refresh(RefreshTypeNormal).
|
||||
|
||||
@@ -48,8 +48,8 @@ func TestNamespacedGetLogsAllow(_ *testing.T) {
|
||||
func TestNamespacedGetLogsDeny(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
accountCtx := accountFixture.Given(t)
|
||||
accountCtx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -76,7 +76,7 @@ func TestNamespacedGetLogsDeny(t *testing.T) {
|
||||
},
|
||||
}, "app-creator")
|
||||
|
||||
ctx := GivenWithSameState(t)
|
||||
ctx := GivenWithSameState(accountCtx)
|
||||
ctx.SetAppNamespace(fixture.ArgoCDAppNamespace)
|
||||
ctx.
|
||||
Path("guestbook-logs").
|
||||
@@ -95,8 +95,8 @@ func TestNamespacedGetLogsDeny(t *testing.T) {
|
||||
func TestNamespacedGetLogsAllowNS(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
accountCtx := accountFixture.Given(t)
|
||||
accountCtx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -128,7 +128,7 @@ func TestNamespacedGetLogsAllowNS(t *testing.T) {
|
||||
},
|
||||
}, "app-creator")
|
||||
|
||||
ctx := GivenWithSameState(t)
|
||||
ctx := GivenWithSameState(accountCtx)
|
||||
ctx.SetAppNamespace(fixture.AppNamespace())
|
||||
ctx.
|
||||
Path("guestbook-logs").
|
||||
@@ -220,11 +220,11 @@ func TestNamespacedAppCreation(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, fixture.Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
assert.Equal(t, fixture.AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, fixture.RepoURL(fixture.RepoURLTypeFile), app.Spec.GetSource().RepoURL)
|
||||
assert.Equal(t, guestbookPath, app.Spec.GetSource().Path)
|
||||
assert.Equal(t, fixture.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(NamespacedEvent(fixture.AppNamespace(), EventReasonResourceCreated, "create")).
|
||||
@@ -272,7 +272,7 @@ func TestNamespacedAppCreationWithoutForceUpdate(t *testing.T) {
|
||||
assert.Equal(t, fixture.AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, fixture.RepoURL(fixture.RepoURLTypeFile), app.Spec.GetSource().RepoURL)
|
||||
assert.Equal(t, guestbookPath, app.Spec.GetSource().Path)
|
||||
assert.Equal(t, fixture.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, "in-cluster", app.Spec.Destination.Name)
|
||||
}).
|
||||
Expect(NamespacedEvent(fixture.AppNamespace(), EventReasonResourceCreated, "create")).
|
||||
@@ -314,7 +314,8 @@ func TestNamespacedDeleteAppResource(t *testing.T) {
|
||||
// demonstrate that we cannot use a standard sync when an immutable field is changed, we must use "force"
|
||||
func TestNamespacedImmutableChange(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("secrets").
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
@@ -338,7 +339,7 @@ func TestNamespacedImmutableChange(t *testing.T) {
|
||||
Expect(ResourceResultMatches(ResourceResult{
|
||||
Kind: "Secret",
|
||||
Version: "v1",
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
Namespace: ctx.DeploymentNamespace(),
|
||||
Name: "test-secret",
|
||||
SyncPhase: "Sync",
|
||||
Status: "SyncFailed",
|
||||
@@ -394,16 +395,17 @@ func TestNamespacedAppDeletion(t *testing.T) {
|
||||
|
||||
func TestNamespacedAppLabels(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
label := "id=" + ctx.ShortID()
|
||||
ctx.
|
||||
Path("config-map").
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
When().
|
||||
CreateApp("-l", "foo=bar").
|
||||
CreateApp("-l", label).
|
||||
Then().
|
||||
And(func(_ *Application) {
|
||||
assert.Contains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list")), ctx.AppQualifiedName())
|
||||
assert.Contains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list", "-l", "foo=bar")), ctx.AppQualifiedName())
|
||||
assert.Contains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list", "-l", label)), ctx.AppQualifiedName())
|
||||
assert.NotContains(t, errors.NewHandler(t).FailOnErr(fixture.RunCli("app", "list", "-l", "foo=rubbish")), ctx.AppQualifiedName())
|
||||
}).
|
||||
Given().
|
||||
@@ -418,11 +420,12 @@ func TestNamespacedAppLabels(t *testing.T) {
|
||||
// check we can update the app and it is then sync'd
|
||||
Given().
|
||||
When().
|
||||
Sync("-l", "foo=bar")
|
||||
Sync("-l", label)
|
||||
}
|
||||
|
||||
func TestNamespacedTrackAppStateAndSyncApp(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path(guestbookPath).
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
@@ -433,8 +436,8 @@ func TestNamespacedTrackAppStateAndSyncApp(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced ", fixture.DeploymentNamespace()))).
|
||||
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced", fixture.DeploymentNamespace()))).
|
||||
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced ", ctx.DeploymentNamespace()))).
|
||||
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced", ctx.DeploymentNamespace()))).
|
||||
Expect(NamespacedEvent(fixture.AppNamespace(), EventReasonResourceUpdated, "sync")).
|
||||
And(func(app *Application) {
|
||||
assert.NotNil(t, app.Status.OperationState.SyncResult)
|
||||
@@ -604,12 +607,12 @@ func TestNamespacedAppWithSecrets(t *testing.T) {
|
||||
_, err = fixture.RunCli("app", "patch-resource", ctx.AppQualifiedName(), "--resource-name", "test-secret",
|
||||
"--kind", "Secret", "--patch", `{"op": "add", "path": "/data", "value": "hello"}'`,
|
||||
"--patch-type", "application/json-patch+json")
|
||||
require.ErrorContains(t, err, fmt.Sprintf("failed to patch Secret %s/test-secret", fixture.DeploymentNamespace()))
|
||||
require.ErrorContains(t, err, fmt.Sprintf("failed to patch Secret %s/test-secret", ctx.DeploymentNamespace()))
|
||||
assert.NotContains(t, err.Error(), "username")
|
||||
assert.NotContains(t, err.Error(), "password")
|
||||
|
||||
// patch secret and make sure app is out of sync and diff detects the change
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Secrets(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Secrets(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"test-secret", types.JSONPatchType, []byte(`[
|
||||
{"op": "remove", "path": "/data/username"},
|
||||
{"op": "add", "path": "/stringData", "value": {"password": "foo"}}
|
||||
@@ -673,7 +676,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
// Patch deployment
|
||||
_, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
_, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.JSONPatchType, []byte(`[{ "op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "test" }]`), metav1.PatchOptions{})
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
@@ -684,7 +687,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
And(func(_ *Application) {
|
||||
diffOutput, err := fixture.RunCli("app", "diff", ctx.AppQualifiedName(), "--local-repo-root", ".", "--local", "testdata/guestbook")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", fixture.DeploymentNamespace()))
|
||||
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", ctx.DeploymentNamespace()))
|
||||
}).
|
||||
Given().
|
||||
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {
|
||||
@@ -713,7 +716,7 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
}]`).
|
||||
Sync().
|
||||
And(func() {
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", fixture.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", ctx.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "serverside-applied")
|
||||
}).
|
||||
@@ -740,12 +743,12 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
"value": { "syncOptions": ["RespectIgnoreDifferences=true"] }
|
||||
}]`).
|
||||
And(func() {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(3), *deployment.Spec.RevisionHistoryLimit)
|
||||
}).
|
||||
And(func() {
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", fixture.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
output, err := fixture.RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", ctx.DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "serverside-applied")
|
||||
}).
|
||||
@@ -754,13 +757,13 @@ func TestNamespacedResourceDiffing(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(1), *deployment.Spec.RevisionHistoryLimit)
|
||||
}).
|
||||
When().Sync().Then().Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(1), *deployment.Spec.RevisionHistoryLimit)
|
||||
})
|
||||
@@ -782,7 +785,7 @@ func TestNamespacedKnownTypesInCRDDiffing(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
When().
|
||||
And(func() {
|
||||
dummyResIf := fixture.DynamicClientset.Resource(dummiesGVR).Namespace(fixture.DeploymentNamespace())
|
||||
dummyResIf := fixture.DynamicClientset.Resource(dummiesGVR).Namespace(ctx.DeploymentNamespace())
|
||||
patchData := []byte(`{"spec":{"cpu": "2"}}`)
|
||||
errors.NewHandler(t).FailOnErr(dummyResIf.Patch(t.Context(), "dummy-crd-instance", types.MergePatchType, patchData, metav1.PatchOptions{}))
|
||||
}).Refresh(RefreshTypeNormal).
|
||||
@@ -869,7 +872,7 @@ func TestNamespacedResourceAction(t *testing.T) {
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
Version: ptr.To("v1"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(ctx.DeploymentNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -880,14 +883,14 @@ func TestNamespacedResourceAction(t *testing.T) {
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
Version: ptr.To("v1"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(ctx.DeploymentNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Action: ptr.To("sample"),
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "test", deployment.Labels["sample"])
|
||||
@@ -1023,7 +1026,7 @@ func TestNamespacedSyncAsync(t *testing.T) {
|
||||
}
|
||||
|
||||
// assertResourceActions verifies if view/modify resource actions are successful/failing for given application
|
||||
func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
func assertNSResourceActions(t *testing.T, appName string, deploymentNamespace string, successful bool) {
|
||||
t.Helper()
|
||||
assertError := func(err error, message string) {
|
||||
if successful {
|
||||
@@ -1036,7 +1039,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
closer, cdClient := fixture.ArgoCDClientset.NewApplicationClientOrDie()
|
||||
defer utilio.Close(closer)
|
||||
|
||||
deploymentResource, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deploymentResource, err := fixture.KubeClientset.AppsV1().Deployments(deploymentNamespace).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
logs, err := cdClient.PodLogs(t.Context(), &applicationpkg.ApplicationPodLogsQuery{
|
||||
@@ -1044,7 +1047,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Kind: ptr.To("Deployment"),
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Container: ptr.To(""),
|
||||
SinceSeconds: ptr.To(int64(0)),
|
||||
TailLines: ptr.To(int64(0)),
|
||||
@@ -1060,7 +1063,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
ResourceNamespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
ResourceNamespace: ptr.To(deploymentNamespace),
|
||||
ResourceUID: ptr.To(string(deploymentResource.UID)),
|
||||
})
|
||||
assertError(err, fmt.Sprintf("%s not found as part of application %s", "guestbook-ui", appName))
|
||||
@@ -1069,7 +1072,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Version: ptr.To("v1"),
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
@@ -1080,7 +1083,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Version: ptr.To("v1"),
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
@@ -1092,7 +1095,7 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
Name: &appName,
|
||||
AppNamespace: ptr.To(fixture.AppNamespace()),
|
||||
ResourceName: ptr.To("guestbook-ui"),
|
||||
Namespace: ptr.To(fixture.DeploymentNamespace()),
|
||||
Namespace: ptr.To(deploymentNamespace),
|
||||
Version: ptr.To("v1"),
|
||||
Group: ptr.To("apps"),
|
||||
Kind: ptr.To("Deployment"),
|
||||
@@ -1102,22 +1105,20 @@ func assertNSResourceActions(t *testing.T, appName string, successful bool) {
|
||||
|
||||
func TestNamespacedPermissions(t *testing.T) {
|
||||
appCtx := Given(t)
|
||||
projName := "argo-project"
|
||||
projActions := projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
projCtx := projectFixture.GivenWithSameState(appCtx)
|
||||
projActions := projCtx.
|
||||
SourceNamespaces([]string{fixture.AppNamespace()}).
|
||||
When().
|
||||
Create()
|
||||
|
||||
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'argo-project'", fixture.RepoURL(fixture.RepoURLTypeFile))
|
||||
destinationError := fmt.Sprintf("application destination server '%s' and namespace '%s' do not match any of the allowed destinations in project 'argo-project'", KubernetesInternalAPIServerAddr, fixture.DeploymentNamespace())
|
||||
sourceError := fmt.Sprintf("application repo %s is not permitted in project '%s'", fixture.RepoURL(fixture.RepoURLTypeFile), projCtx.GetName())
|
||||
destinationError := fmt.Sprintf("application destination server '%s' and namespace '%s' do not match any of the allowed destinations in project '%s'", KubernetesInternalAPIServerAddr, appCtx.DeploymentNamespace(), projCtx.GetName())
|
||||
|
||||
appCtx.
|
||||
Path("guestbook-logs").
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
Project(projName).
|
||||
Project(projCtx.GetName()).
|
||||
When().
|
||||
IgnoreErrors().
|
||||
// ensure app is not created if project permissions are missing
|
||||
@@ -1138,7 +1139,7 @@ func TestNamespacedPermissions(t *testing.T) {
|
||||
Then().
|
||||
// make sure application resource actions are successful
|
||||
And(func(app *Application) {
|
||||
assertNSResourceActions(t, app.Name, true)
|
||||
assertNSResourceActions(t, app.Name, appCtx.DeploymentNamespace(), true)
|
||||
}).
|
||||
When().
|
||||
// remove projet permissions and "refresh" app
|
||||
@@ -1175,29 +1176,27 @@ func TestNamespacedPermissions(t *testing.T) {
|
||||
Then().
|
||||
// make sure application resource actions are failing
|
||||
And(func(app *Application) {
|
||||
assertNSResourceActions(t, app.Name, false)
|
||||
assertNSResourceActions(t, app.Name, appCtx.DeploymentNamespace(), false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNamespacedPermissionWithScopedRepo(t *testing.T) {
|
||||
projName := "argo-project"
|
||||
fixture.EnsureCleanState(t)
|
||||
projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
ctx := Given(t)
|
||||
projCtx := projectFixture.GivenWithSameState(ctx)
|
||||
projCtx.
|
||||
SourceNamespaces([]string{fixture.AppNamespace()}).
|
||||
Destination("*,*").
|
||||
When().
|
||||
Create()
|
||||
|
||||
repoFixture.GivenWithSameState(t).
|
||||
repoFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
Path(fixture.RepoURL(fixture.RepoURLTypeFile)).
|
||||
Project(projName).
|
||||
Project(projCtx.GetName()).
|
||||
Create()
|
||||
|
||||
GivenWithSameState(t).
|
||||
Project(projName).
|
||||
GivenWithSameState(ctx).
|
||||
Project(projCtx.GetName()).
|
||||
RepoURLType(fixture.RepoURLTypeFile).
|
||||
Path("two-nice-pods").
|
||||
SetTrackingMethod("annotation").
|
||||
@@ -1221,22 +1220,19 @@ func TestNamespacedPermissionWithScopedRepo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNamespacedPermissionDeniedWithScopedRepo(t *testing.T) {
|
||||
projName := "argo-project"
|
||||
projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
Destination("*,*").
|
||||
ctx := projectFixture.Given(t)
|
||||
ctx.Destination("*,*").
|
||||
SourceNamespaces([]string{fixture.AppNamespace()}).
|
||||
When().
|
||||
Create()
|
||||
|
||||
repoFixture.GivenWithSameState(t).
|
||||
repoFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
Path(fixture.RepoURL(fixture.RepoURLTypeFile)).
|
||||
Create()
|
||||
|
||||
GivenWithSameState(t).
|
||||
Project(projName).
|
||||
GivenWithSameState(ctx).
|
||||
Project(ctx.GetName()).
|
||||
RepoURLType(fixture.RepoURLTypeFile).
|
||||
SetTrackingMethod("annotation").
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
@@ -1409,7 +1405,8 @@ func TestNamespacedRevisionHistoryLimit(t *testing.T) {
|
||||
|
||||
func TestNamespacedOrphanedResource(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
ProjectSpec(AppProjectSpec{
|
||||
SourceRepos: []string{"*"},
|
||||
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
|
||||
@@ -1427,7 +1424,7 @@ func TestNamespacedOrphanedResource(t *testing.T) {
|
||||
Expect(NoConditions()).
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(fixture.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(ctx.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "orphaned-configmap",
|
||||
},
|
||||
@@ -1513,7 +1510,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sample-ingress",
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:networking/Ingress:%s/sample-ingress", fixture.AppNamespace(), ctx.AppName(), fixture.DeploymentNamespace()),
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:networking/Ingress:%s/sample-ingress", fixture.AppNamespace(), ctx.AppName(), ctx.DeploymentNamespace()),
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
@@ -1544,7 +1541,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "guestbook-ui",
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:Service:%s/guesbook-ui", fixture.TestNamespace(), ctx.AppQualifiedName(), fixture.DeploymentNamespace()),
|
||||
common.AnnotationKeyAppInstance: fmt.Sprintf("%s_%s:Service:%s/guesbook-ui", fixture.TestNamespace(), ctx.AppQualifiedName(), ctx.DeploymentNamespace()),
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
@@ -1560,7 +1557,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
|
||||
ctx.ProjectSpec(AppProjectSpec{
|
||||
SourceRepos: []string{"*"},
|
||||
Destinations: []ApplicationDestination{{Namespace: fixture.DeploymentNamespace(), Server: "*"}},
|
||||
Destinations: []ApplicationDestination{{Namespace: ctx.DeploymentNamespace(), Server: "*"}},
|
||||
SourceNamespaces: []string{fixture.AppNamespace()},
|
||||
NamespaceResourceBlacklist: []metav1.GroupKind{
|
||||
{Group: "", Kind: "Service"},
|
||||
@@ -1568,7 +1565,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
}).
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.NetworkingV1().Ingresses(fixture.TestNamespace()).Create(t.Context(), ingress, metav1.CreateOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(fixture.DeploymentNamespace()).Create(t.Context(), svc, metav1.CreateOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(ctx.DeploymentNamespace()).Create(t.Context(), svc, metav1.CreateOptions{}))
|
||||
}).
|
||||
Path(guestbookPath).
|
||||
When().
|
||||
@@ -1594,7 +1591,7 @@ func TestNamespacedNotPermittedResources(t *testing.T) {
|
||||
|
||||
// Make sure prohibited resources are not deleted during application deletion
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.NetworkingV1().Ingresses(fixture.TestNamespace()).Get(t.Context(), "sample-ingress", metav1.GetOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{}))
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().Services(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{}))
|
||||
}
|
||||
|
||||
func TestNamespacedSyncWithInfos(t *testing.T) {
|
||||
@@ -1694,7 +1691,8 @@ func TestNamespacedCreateAppWithNoNameSpaceWhenRequired2(t *testing.T) {
|
||||
|
||||
func TestNamespacedListResource(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "OPENSHIFT")
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
SetTrackingMethod("annotation").
|
||||
ProjectSpec(AppProjectSpec{
|
||||
@@ -1712,7 +1710,7 @@ func TestNamespacedListResource(t *testing.T) {
|
||||
Expect(NoConditions()).
|
||||
When().
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(fixture.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.CoreV1().ConfigMaps(ctx.DeploymentNamespace()).Create(t.Context(), &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "orphaned-configmap",
|
||||
},
|
||||
@@ -1970,7 +1968,7 @@ metadata:
|
||||
labels:
|
||||
test: "true"
|
||||
annotations:
|
||||
something: "whatevs"
|
||||
something: "whatevs"
|
||||
`
|
||||
s := fmt.Sprintf(existingNs, updatedNamespace)
|
||||
|
||||
@@ -2087,7 +2085,8 @@ func TestNamespacedFailedSyncWithRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNamespacedCreateDisableValidation(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
SetAppNamespace(fixture.AppNamespace()).
|
||||
SetTrackingMethod("annotation").
|
||||
Path("baddir").
|
||||
@@ -2096,7 +2095,7 @@ func TestNamespacedCreateDisableValidation(t *testing.T) {
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
_, err := fixture.RunCli("app", "create", app.QualifiedName(), "--upsert", "--validate=false", "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
"--path", "baddir2", "--project", app.Spec.Project, "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", fixture.DeploymentNamespace())
|
||||
"--path", "baddir2", "--project", app.Spec.Project, "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", ctx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
When().
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,12 +28,12 @@ func TestMultiSourceAppCreation(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
@@ -41,7 +41,7 @@ func TestMultiSourceAppCreation(t *testing.T) {
|
||||
// app should be listed
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, Name())
|
||||
assert.Contains(t, output, ctx.GetName())
|
||||
}).
|
||||
Expect(Success("")).
|
||||
Given().Timeout(60).
|
||||
@@ -83,12 +83,12 @@ func TestMultiSourceAppWithHelmExternalValueFiles(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
@@ -96,7 +96,7 @@ func TestMultiSourceAppWithHelmExternalValueFiles(t *testing.T) {
|
||||
// app should be listed
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, Name())
|
||||
assert.Contains(t, output, ctx.GetName())
|
||||
}).
|
||||
Expect(Success("")).
|
||||
Given().Timeout(60).
|
||||
@@ -111,7 +111,7 @@ func TestMultiSourceAppWithHelmExternalValueFiles(t *testing.T) {
|
||||
assert.Equal(t, SyncStatusCodeSynced, statusByName["guestbook-ui"])
|
||||
|
||||
// Confirm that the deployment has 3 replicas.
|
||||
output, err := Run("", "kubectl", "get", "deployment", "guestbook-ui", "-n", DeploymentNamespace(), "-o", "jsonpath={.spec.replicas}")
|
||||
output, err := Run("", "kubectl", "get", "deployment", "guestbook-ui", "-n", ctx.DeploymentNamespace(), "-o", "jsonpath={.spec.replicas}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3", output, "Expected 3 replicas for the helm-guestbook deployment")
|
||||
})
|
||||
@@ -135,12 +135,12 @@ func TestMultiSourceAppWithSourceOverride(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
@@ -148,7 +148,7 @@ func TestMultiSourceAppWithSourceOverride(t *testing.T) {
|
||||
// app should be listed
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, Name())
|
||||
assert.Contains(t, output, ctx.GetName())
|
||||
}).
|
||||
Expect(Success("")).
|
||||
Given().Timeout(60).
|
||||
@@ -166,7 +166,7 @@ func TestMultiSourceAppWithSourceOverride(t *testing.T) {
|
||||
assert.Equal(t, SyncStatusCodeSynced, statusByName["guestbook-ui"])
|
||||
|
||||
// check if label was added to the pod to make sure resource was taken from the later source
|
||||
output, err := Run("", "kubectl", "describe", "pods", "pod-1", "-n", DeploymentNamespace())
|
||||
output, err := Run("", "kubectl", "describe", "pods", "pod-1", "-n", ctx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "foo=bar")
|
||||
})
|
||||
@@ -189,19 +189,19 @@ func TestMultiSourceAppWithSourceName(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
assert.Equal(t, sources[i].Name, source.Name)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
And(func(_ *Application) {
|
||||
// we remove the first source
|
||||
output, err := RunCli("app", "remove-source", Name(), "--source-name", sources[0].Name)
|
||||
output, err := RunCli("app", "remove-source", ctx.GetName(), "--source-name", sources[0].Name)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "updated successfully")
|
||||
}).
|
||||
@@ -209,7 +209,7 @@ func TestMultiSourceAppWithSourceName(t *testing.T) {
|
||||
And(func(app *Application) {
|
||||
assert.Len(t, app.Spec.GetSources(), 1)
|
||||
// we add a source
|
||||
output, err := RunCli("app", "add-source", Name(), "--source-name", sources[0].Name, "--repo", RepoURL(RepoURLTypeFile), "--path", guestbookPath)
|
||||
output, err := RunCli("app", "add-source", ctx.GetName(), "--source-name", sources[0].Name, "--repo", RepoURL(RepoURLTypeFile), "--path", guestbookPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "updated successfully")
|
||||
}).
|
||||
@@ -251,18 +251,18 @@ func TestMultiSourceAppSetWithSourceName(t *testing.T) {
|
||||
CreateMultiSourceAppFromFile().
|
||||
Then().
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
for i, source := range app.Spec.GetSources() {
|
||||
assert.Equal(t, sources[i].RepoURL, source.RepoURL)
|
||||
assert.Equal(t, sources[i].Path, source.Path)
|
||||
assert.Equal(t, sources[i].Name, source.Name)
|
||||
}
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
And(func(_ *Application) {
|
||||
_, err := RunCli("app", "set", Name(), "--source-name", sources[1].Name, "--path", "deployment")
|
||||
_, err := RunCli("app", "set", ctx.GetName(), "--source-name", sources[1].Name, "--path", "deployment")
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
Expect(Success("")).
|
||||
@@ -289,11 +289,11 @@ func TestMultiSourceApptErrorWhenSourceNameAndSourcePosition(t *testing.T) {
|
||||
Then().
|
||||
Expect(Event(EventReasonResourceCreated, "create")).
|
||||
And(func(_ *Application) {
|
||||
_, err := RunCli("app", "get", Name(), "--source-name", sources[1].Name, "--source-position", "1")
|
||||
_, err := RunCli("app", "get", ctx.GetName(), "--source-name", sources[1].Name, "--source-position", "1")
|
||||
assert.ErrorContains(t, err, "Only one of source-position and source-name can be specified.")
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
_, err := RunCli("app", "manifests", Name(), "--revisions", "0.0.2", "--source-names", sources[0].Name, "--revisions", "0.0.2", "--source-positions", "1")
|
||||
_, err := RunCli("app", "manifests", ctx.GetName(), "--revisions", "0.0.2", "--source-names", sources[0].Name, "--revisions", "0.0.2", "--source-positions", "1")
|
||||
assert.ErrorContains(t, err, "Only one of source-positions and source-names can be specified.")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestAppCreationInOtherNamespace(t *testing.T) {
|
||||
assert.Equal(t, AppNamespace(), app.Namespace)
|
||||
assert.Equal(t, RepoURL(RepoURLTypeFile), app.Spec.GetSource().RepoURL)
|
||||
assert.Equal(t, guestbookPath, app.Spec.GetSource().Path)
|
||||
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(NamespacedEvent(ctx.AppNamespace(), EventReasonResourceCreated, "create")).
|
||||
|
||||
@@ -69,9 +69,6 @@ func TestSimpleGitDirectoryGenerator(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -132,7 +129,7 @@ func TestSimpleGitDirectoryGenerator(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -178,9 +175,6 @@ func TestSimpleGitDirectoryGeneratorGoTemplate(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -242,7 +236,7 @@ func TestSimpleGitDirectoryGeneratorGoTemplate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -307,9 +301,6 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -342,7 +333,7 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
}).
|
||||
Then().Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
@@ -409,9 +400,6 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
IgnoreErrors().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -447,7 +435,7 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
},
|
||||
}).Then().
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
@@ -492,9 +480,6 @@ func TestSimpleGitFilesGenerator(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -555,7 +540,7 @@ func TestSimpleGitFilesGenerator(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -621,9 +606,6 @@ func TestSimpleGitFilesGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -655,7 +637,7 @@ func TestSimpleGitFilesGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
|
||||
},
|
||||
}).Then().Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
@@ -722,9 +704,6 @@ func TestSimpleGitFilesGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
IgnoreErrors().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -756,7 +735,7 @@ func TestSimpleGitFilesGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
|
||||
},
|
||||
}).Then().
|
||||
// verify the ApplicationSet error status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", expectedConditionsParamsError)).
|
||||
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
|
||||
Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
@@ -801,9 +780,6 @@ func TestSimpleGitFilesGeneratorGoTemplate(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -865,7 +841,7 @@ func TestSimpleGitFilesGeneratorGoTemplate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedAppsNewMetadata)).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-git-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -878,9 +854,6 @@ func TestSimpleGitFilesPreserveResourcesOnDeletion(t *testing.T) {
|
||||
CreateNamespace(utils.ApplicationsResourcesNamespace).
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster.name}}-guestbook"},
|
||||
@@ -938,9 +911,6 @@ func TestSimpleGitFilesPreserveResourcesOnDeletionGoTemplate(t *testing.T) {
|
||||
CreateNamespace(utils.ApplicationsResourcesNamespace).
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1032,9 +1002,6 @@ func TestGitGeneratorPrivateRepo(t *testing.T) {
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1108,9 +1075,6 @@ func TestGitGeneratorPrivateRepoGoTemplate(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1184,9 +1148,6 @@ func TestSimpleGitGeneratorPrivateRepoWithNoRepo(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1249,20 +1210,15 @@ func TestSimpleGitGeneratorPrivateRepoWithMatchingProject(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx := Given(t)
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("https-kustomize-base"),
|
||||
}
|
||||
|
||||
var expectedAppsNewNamespace []v1alpha1.Application
|
||||
|
||||
Given(t).
|
||||
HTTPSInsecureRepoURLAdded("default").
|
||||
ctx.HTTPSInsecureRepoURLAdded("default").
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1295,7 +1251,7 @@ func TestSimpleGitGeneratorPrivateRepoWithMatchingProject(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist(expectedApps)).
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
|
||||
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
|
||||
func TestSimpleGitGeneratorPrivateRepoWithMismatchingProject(t *testing.T) {
|
||||
@@ -1336,9 +1292,6 @@ func TestSimpleGitGeneratorPrivateRepoWithMismatchingProject(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1412,9 +1365,6 @@ func TestGitGeneratorPrivateRepoWithTemplatedProject(t *testing.T) {
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
@@ -1499,9 +1449,6 @@ func TestGitGeneratorPrivateRepoWithTemplatedProjectAndProjectScopedRepo(t *test
|
||||
When().
|
||||
// Create a GitGenerator-based ApplicationSet
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-git-generator-private",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}"},
|
||||
|
||||
673
test/e2e/applicationset_progressive_sync_test.go
Normal file
673
test/e2e/applicationset_progressive_sync_test.go
Normal file
@@ -0,0 +1,673 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
. "github.com/argoproj/argo-cd/v3/test/e2e/fixture/applicationsets"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
TransitionTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
func TestApplicationSetProgressiveSyncStep(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - env variable not set to enable progressive sync")
|
||||
}
|
||||
expectedDevApp := v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1-dev",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "dev",
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "app1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedStageApp := v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app2-staging",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "staging",
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "app2",
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedProdApp := v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app3-prod",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "prod",
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "app3",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Given(t).
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "progressive-sync-apps",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "{{.name}}-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "{{.name}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"name": "app1", "environment": "dev"}`)},
|
||||
{Raw: []byte(`{"name": "app2", "environment": "staging"}`)},
|
||||
{Raw: []byte(`{"name": "app3", "environment": "prod"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}).
|
||||
Then().
|
||||
And(func() {
|
||||
t.Log("ApplicationSet created ")
|
||||
}).
|
||||
Expect(ApplicationsExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp})).
|
||||
And(func() {
|
||||
t.Log("All applications exist")
|
||||
}).
|
||||
ExpectWithDuration(CheckApplicationInRightSteps("1", []string{"app1-dev"}), TransitionTimeout).
|
||||
ExpectWithDuration(CheckApplicationInRightSteps("2", []string{"app2-staging"}), time.Second*5).
|
||||
ExpectWithDuration(CheckApplicationInRightSteps("3", []string{"app3-prod"}), time.Second*5).
|
||||
// cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
ExpectWithDuration(ApplicationsDoNotExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp}), time.Minute)
|
||||
}
|
||||
|
||||
func TestProgressiveSyncHealthGating(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
expectedDevApp := generateExpectedApp("prog-", "progressive-sync/", "dev", "dev")
|
||||
expectedStageApp := generateExpectedApp("prog-", "progressive-sync/", "staging", "staging")
|
||||
expectedProdApp := generateExpectedApp("prog-", "progressive-sync/", "prod", "prod")
|
||||
|
||||
expectedStatusWave1 := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncProgressing,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncWaiting,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncWaiting,
|
||||
},
|
||||
}
|
||||
|
||||
expectedStatusWave2 := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncProgressing,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncWaiting,
|
||||
},
|
||||
}
|
||||
|
||||
expectedStatusWave3 := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncProgressing,
|
||||
},
|
||||
}
|
||||
|
||||
expectedAllHealthy := map[string]v1alpha1.ApplicationSetApplicationStatus{
|
||||
"prog-dev": {
|
||||
Application: "prog-dev",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-staging": {
|
||||
Application: "prog-staging",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
"prog-prod": {
|
||||
Application: "prog-prod",
|
||||
Status: v1alpha1.ProgressiveSyncHealthy,
|
||||
},
|
||||
}
|
||||
|
||||
Given(t).
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "progressive-sync-gating",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/{{.environment}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.environment}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"environment": "dev"}`)},
|
||||
{Raw: []byte(`{"environment": "staging"}`)},
|
||||
{Raw: []byte(`{"environment": "prod"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}).
|
||||
Then().
|
||||
Expect(ApplicationsExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp})).
|
||||
And(func() {
|
||||
t.Log("ApplicationSet created")
|
||||
t.Log("Checking Dev app should be stuck in Progressing (invalid image)")
|
||||
t.Log("Verifying staging and prod are Waiting")
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedStatusWave1), TransitionTimeout).
|
||||
And(func() {
|
||||
// Patch deployment to use valid image
|
||||
fixture.Patch(t, "progressive-sync/dev/deployment.yaml", `[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "quay.io/argoprojlabs/argocd-e2e-container:0.1"}]`)
|
||||
// Refresh the app to detect git changes
|
||||
_, err := fixture.RunCli("app", "get", "prog-dev", "--refresh")
|
||||
require.NoError(t, err)
|
||||
t.Log("After patching image and refreshing, Dev app should progress to Healthy")
|
||||
t.Log("Staging app should now be in Progressing, and prod is waiting")
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedStatusWave2), TransitionTimeout).
|
||||
And(func() {
|
||||
// Patch deployment to use valid image
|
||||
fixture.Patch(t, "progressive-sync/staging/deployment.yaml", `[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "quay.io/argoprojlabs/argocd-e2e-container:0.1"}]`)
|
||||
// Refresh the app to detect git changes
|
||||
_, err := fixture.RunCli("app", "get", "prog-staging", "--refresh")
|
||||
require.NoError(t, err)
|
||||
t.Log("Dev and staging are now Healthy")
|
||||
t.Log("check Prod app is progressing")
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedStatusWave3), TransitionTimeout).
|
||||
And(func() {
|
||||
// Patch deployment to use valid image
|
||||
fixture.Patch(t, "progressive-sync/prod/deployment.yaml", `[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "quay.io/argoprojlabs/argocd-e2e-container:0.1"}]`)
|
||||
// Refresh the app to detect git changes
|
||||
_, err := fixture.RunCli("app", "get", "prog-prod", "--refresh")
|
||||
require.NoError(t, err)
|
||||
}).
|
||||
ExpectWithDuration(CheckProgressiveSyncStatusCodeOfApplications(expectedAllHealthy), TransitionTimeout).
|
||||
And(func() {
|
||||
t.Log("progressive sync verified")
|
||||
t.Log("Dev progressed first")
|
||||
t.Log("Staging waited until Dev was Healthy")
|
||||
t.Log("Prod waited until Staging was Healthy")
|
||||
}).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
ExpectWithDuration(ApplicationsDoNotExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp}), TransitionTimeout)
|
||||
}
|
||||
|
||||
func TestNoApplicationStatusWhenNoSteps(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
|
||||
expectedConditions := []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: v1alpha1.ApplicationSetReasonParametersGenerated,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
Message: "ApplicationSet Rollout has completed",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
|
||||
},
|
||||
}
|
||||
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod"),
|
||||
}
|
||||
Given(t).
|
||||
When().
|
||||
Create(appSetInvalidStepConfiguration).
|
||||
Then().
|
||||
Expect(ApplicationSetHasConditions(expectedConditions)). // TODO: when no steps created, condition should reflect that.
|
||||
Expect(ApplicationSetDoesNotHaveApplicationStatus()).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
ExpectWithDuration(ApplicationsDoNotExist(expectedApps), TransitionTimeout)
|
||||
}
|
||||
|
||||
func TestNoApplicationStatusWhenNoApplications(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod"),
|
||||
}
|
||||
Given(t).
|
||||
When().
|
||||
Create(appSetWithEmptyGenerator).
|
||||
Then().
|
||||
Expect(ApplicationsDoNotExist(expectedApps)).
|
||||
Expect(ApplicationSetDoesNotHaveApplicationStatus()).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
|
||||
func TestProgressiveSyncMultipleAppsPerStep(t *testing.T) {
|
||||
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
|
||||
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
|
||||
}
|
||||
expectedApps := []v1alpha1.Application{
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "sketch", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "build", "dev"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "verify", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "validate", "staging"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "ship", "prod"),
|
||||
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "run", "prod"),
|
||||
}
|
||||
Given(t).
|
||||
When().
|
||||
Create(appSetWithMultipleAppsInEachStep).
|
||||
Then().
|
||||
Expect(ApplicationsExist(expectedApps)).
|
||||
Expect(CheckApplicationInRightSteps("1", []string{"prog-sketch", "prog-build"})).
|
||||
Expect(CheckApplicationInRightSteps("2", []string{"prog-verify", "prog-validate"})).
|
||||
Expect(CheckApplicationInRightSteps("3", []string{"prog-ship", "prog-run"})).
|
||||
ExpectWithDuration(ApplicationSetHasApplicationStatus(6), TransitionTimeout).
|
||||
// Cleanup
|
||||
When().
|
||||
Delete().
|
||||
Then().
|
||||
Expect(ApplicationsDoNotExist(expectedApps))
|
||||
}
|
||||
|
||||
var appSetInvalidStepConfiguration = v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-step-configuration",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ApplicationSet",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/{{.environment}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.environment}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"environment": "dev"}`)},
|
||||
{Raw: []byte(`{"environment": "staging"}`)},
|
||||
{Raw: []byte(`{"environment": "prod"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
// Empty Steps with Rolling Sync shouldn't trigger
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var appSetWithEmptyGenerator = v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "appset-empty-generator",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ApplicationSet",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.environment}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/{{.environment}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.environment}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
// Empty Generator
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var appSetWithMultipleAppsInEachStep = v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "progressive-sync-multi-apps",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "prog-{{.name}}",
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": "{{.environment}}",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: "progressive-sync/multiple-apps-in-step/{{.environment}}/{{.name}}",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "prog-{{.name}}",
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
SyncOptions: v1alpha1.SyncOptions{"CreateNamespace=true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"environment": "dev", "name": "sketch"}`)},
|
||||
{Raw: []byte(`{"environment": "dev", "name": "build"}`)},
|
||||
{Raw: []byte(`{"environment": "staging", "name": "verify"}`)},
|
||||
{Raw: []byte(`{"environment": "staging", "name": "validate"}`)},
|
||||
{Raw: []byte(`{"environment": "prod", "name": "ship"}`)},
|
||||
{Raw: []byte(`{"environment": "prod", "name": "run"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: generateStandardRolloutSyncSteps(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func generateExpectedApp(prefix string, path string, name string, envVar string) v1alpha1.Application {
|
||||
return v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: prefix + name,
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Labels: map[string]string{
|
||||
"environment": envVar,
|
||||
},
|
||||
Finalizers: []string{
|
||||
"resources-finalizer.argocd.argoproj.io",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
Path: path + name,
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: prefix + name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateStandardRolloutSyncSteps() []v1alpha1.ApplicationSetRolloutStep {
|
||||
return []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{"dev"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{"staging"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{"prod"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -15,13 +15,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
. "github.com/argoproj/argo-cd/v3/test/e2e/fixture/applicationsets"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture/applicationsets/utils"
|
||||
|
||||
@@ -82,39 +82,35 @@ func TestSimpleListGeneratorExternalNamespace(t *testing.T) {
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
CreateNamespace(externalNamespace).Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator-external",
|
||||
Namespace: externalNamespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
|
||||
// Update the ApplicationSet template namespace, and verify it updates the Applications
|
||||
When().
|
||||
@@ -143,7 +139,7 @@ func TestSimpleListGeneratorExternalNamespace(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator-external", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -209,74 +205,72 @@ func TestSimpleListGeneratorExternalNamespaceNoConflict(t *testing.T) {
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace2).
|
||||
CreateNamespace(externalNamespace2).Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator-external",
|
||||
Namespace: externalNamespace2,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: externalNamespace2,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedAppExternalNamespace2})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedAppExternalNamespace2})).
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
CreateNamespace(externalNamespace).Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator-external",
|
||||
Namespace: externalNamespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: externalNamespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{.cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{.url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace2).
|
||||
Then().
|
||||
@@ -317,7 +311,7 @@ func TestSimpleListGeneratorExternalNamespaceNoConflict(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator-external", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
When().
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace2).
|
||||
Then().
|
||||
@@ -365,37 +359,35 @@ func TestSimpleListGenerator(t *testing.T) {
|
||||
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{cluster}}-guestbook"},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
TargetRevision: "HEAD",
|
||||
Path: "guestbook",
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "{{url}}",
|
||||
Namespace: "guestbook",
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
|
||||
|
||||
// Update the ApplicationSet template namespace, and verify it updates the Applications
|
||||
When().
|
||||
@@ -420,7 +412,7 @@ func TestSimpleListGenerator(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -457,9 +449,6 @@ func TestSimpleListGeneratorGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -512,7 +501,7 @@ func TestSimpleListGeneratorGoTemplate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("simple-list-generator", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -553,9 +542,6 @@ func TestRenderHelmValuesObject(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-values-object",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -651,9 +637,6 @@ func TestTemplatePatch(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "patch-template",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -714,7 +697,7 @@ func TestTemplatePatch(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("patch-template", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it deletes the Applications
|
||||
When().
|
||||
@@ -755,9 +738,6 @@ func TestUpdateHelmValuesObject(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-values-object-patch",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -791,7 +771,7 @@ func TestUpdateHelmValuesObject(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}).Then().
|
||||
Expect(ApplicationSetHasConditions("test-values-object-patch", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
When().
|
||||
// Update the app spec with some knew ValuesObject to force a merge
|
||||
Update(func(as *v1alpha1.ApplicationSet) {
|
||||
@@ -836,9 +816,6 @@ func TestSyncPolicyCreateUpdate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sync-policy-create-update",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -911,7 +888,7 @@ func TestSyncPolicyCreateUpdate(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("sync-policy-create-update", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it not deletes the Applications
|
||||
// As policy is create-update, AppSet controller will remove all generated applications's ownerReferences on delete AppSet
|
||||
@@ -949,9 +926,6 @@ func TestSyncPolicyCreateDelete(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sync-policy-create-delete",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1012,7 +986,7 @@ func TestSyncPolicyCreateDelete(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("sync-policy-create-delete", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet
|
||||
When().
|
||||
@@ -1048,9 +1022,6 @@ func TestSyncPolicyCreateOnly(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sync-policy-create-only",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1114,7 +1085,7 @@ func TestSyncPolicyCreateOnly(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewNamespace})).
|
||||
|
||||
// verify the ApplicationSet status conditions were set correctly
|
||||
Expect(ApplicationSetHasConditions("sync-policy-create-only", ExpectedConditions)).
|
||||
Expect(ApplicationSetHasConditions(ExpectedConditions)).
|
||||
|
||||
// Delete the ApplicationSet, and verify it not deletes the Applications
|
||||
// As policy is create-update, AppSet controller will remove all generated applications's ownerReferences on delete AppSet
|
||||
@@ -1365,9 +1336,6 @@ func TestSimpleSCMProviderGenerator(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{ repository }}-guestbook"},
|
||||
@@ -1440,9 +1408,6 @@ func TestSimpleSCMProviderGeneratorGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1507,12 +1472,9 @@ func TestSCMProviderGeneratorSCMProviderNotAllowed(t *testing.T) {
|
||||
// Because you can't &"".
|
||||
repoMatch := "argo-cd"
|
||||
|
||||
Given(t).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "scm-provider-generator-scm-provider-not-allowed",
|
||||
},
|
||||
ctx := Given(t)
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
ctx.When().Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1549,7 +1511,7 @@ func TestSCMProviderGeneratorSCMProviderNotAllowed(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp})).
|
||||
And(func() {
|
||||
// app should be listed
|
||||
output, err := fixture.RunCli("appset", "get", "scm-provider-generator-scm-provider-not-allowed")
|
||||
output, err := fixture.RunCli("appset", "get", ctx.GetName())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "scm provider not allowed")
|
||||
})
|
||||
@@ -1583,9 +1545,6 @@ func TestCustomApplicationFinalizers(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
@@ -1650,9 +1609,6 @@ func TestCustomApplicationFinalizersGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create a ListGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-list-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -1783,9 +1739,6 @@ func TestSimpleSCMProviderGeneratorTokenRefStrictOk(t *testing.T) {
|
||||
}).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator-strict",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{ repository }}-guestbook"},
|
||||
@@ -1869,29 +1822,26 @@ func TestSimpleSCMProviderGeneratorTokenRefStrictKo(t *testing.T) {
|
||||
// Because you can't &"".
|
||||
repoMatch := "argo-cd"
|
||||
|
||||
Given(t).
|
||||
And(func() {
|
||||
_, err := utils.GetE2EFixtureK8sClient(t).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Create(t.Context(), &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Name: secretName,
|
||||
Labels: map[string]string{
|
||||
// Try to exfiltrate cluster secret
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
|
||||
},
|
||||
ctx := Given(t)
|
||||
ctx.And(func() {
|
||||
_, err := utils.GetE2EFixtureK8sClient(t).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Create(t.Context(), &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: fixture.TestNamespace(),
|
||||
Name: secretName,
|
||||
Labels: map[string]string{
|
||||
// Try to exfiltrate cluster secret
|
||||
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"hello": []byte("world"),
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"hello": []byte("world"),
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
assert.NoError(t, err)
|
||||
}).
|
||||
assert.NoError(t, err)
|
||||
}).
|
||||
// Create an SCMProviderGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-scm-provider-generator-strict-ko",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{ repository }}-guestbook"},
|
||||
@@ -1932,7 +1882,7 @@ func TestSimpleSCMProviderGeneratorTokenRefStrictKo(t *testing.T) {
|
||||
When().
|
||||
And(func() {
|
||||
// app should be listed
|
||||
output, err := fixture.RunCli("appset", "get", "simple-scm-provider-generator-strict-ko")
|
||||
output, err := fixture.RunCli("appset", "get", ctx.GetName())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, fmt.Sprintf("scm provider: error fetching Github token: secret %s/%s is not a valid SCM creds secret", fixture.TestNamespace(), secretName))
|
||||
err2 := utils.GetE2EFixtureK8sClient(t).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Delete(t.Context(), secretName, metav1.DeleteOptions{})
|
||||
@@ -1978,9 +1928,6 @@ func TestSimplePullRequestGenerator(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-pull-request-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "guestbook-{{ number }}"},
|
||||
@@ -2057,9 +2004,6 @@ func TestSimplePullRequestGeneratorGoTemplate(t *testing.T) {
|
||||
Given(t).
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-pull-request-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -2132,12 +2076,9 @@ func TestPullRequestGeneratorNotAllowedSCMProvider(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
Given(t).
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
When().Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pull-request-generator-not-allowed-scm",
|
||||
},
|
||||
ctx := Given(t)
|
||||
// Create an PullRequestGenerator-based ApplicationSet
|
||||
ctx.When().Create(v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
@@ -2179,7 +2120,7 @@ func TestPullRequestGeneratorNotAllowedSCMProvider(t *testing.T) {
|
||||
}).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp})).
|
||||
And(func() {
|
||||
// app should be listed
|
||||
output, err := fixture.RunCli("appset", "get", "pull-request-generator-not-allowed-scm")
|
||||
output, err := fixture.RunCli("appset", "get", ctx.GetName())
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, "scm provider not allowed")
|
||||
})
|
||||
|
||||
@@ -35,27 +35,27 @@ func createTestPlugin(t *testing.T, name, content string) string {
|
||||
|
||||
// TestCliAppCommand verifies the basic Argo CD CLI commands for app synchronization and listing.
|
||||
func TestCliAppCommand(t *testing.T) {
|
||||
Given(t).
|
||||
Path("hook").
|
||||
ctx := Given(t)
|
||||
ctx.Path("hook").
|
||||
When().
|
||||
CreateApp().
|
||||
And(func() {
|
||||
output, err := RunCli("app", "sync", Name(), "--timeout", "90")
|
||||
output, err := RunCli("app", "sync", ctx.AppName(), "--timeout", "90")
|
||||
require.NoError(t, err)
|
||||
vars := map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()}
|
||||
vars := map[string]any{"Name": ctx.AppName(), "Namespace": ctx.DeploymentNamespace()}
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} pod Synced Progressing pod/pod created`, vars))
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} hook Succeeded Sync pod/hook created`, vars))
|
||||
}).
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
And(func(a *Application) {
|
||||
output, err := RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
expected := Tmpl(
|
||||
t,
|
||||
`{{.Name}} https://kubernetes.default.svc {{.Namespace}} default Synced Healthy Manual <none>`,
|
||||
map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()})
|
||||
map[string]any{"Name": a.GetName(), "Namespace": ctx.DeploymentNamespace()})
|
||||
assert.Contains(t, NormalizeOutput(output), expected)
|
||||
})
|
||||
}
|
||||
@@ -75,17 +75,18 @@ func TestNormalArgoCDCommandsExecuteOverPluginsWithSameName(t *testing.T) {
|
||||
})
|
||||
t.Setenv("PATH", filepath.Dir(pluginPath)+":"+origPath)
|
||||
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.Path("hook").
|
||||
Path("hook").
|
||||
When().
|
||||
CreateApp().
|
||||
And(func() {
|
||||
output, err := RunCli("app", "sync", Name(), "--timeout", "90")
|
||||
output, err := RunCli("app", "sync", ctx.AppName(), "--timeout", "90")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotContains(t, NormalizeOutput(output), "I am a plugin, not Argo CD!")
|
||||
|
||||
vars := map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()}
|
||||
vars := map[string]any{"Name": ctx.AppName(), "Namespace": ctx.DeploymentNamespace()}
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} pod Synced Progressing pod/pod created`, vars))
|
||||
assert.Contains(t, NormalizeOutput(output), Tmpl(t, `Pod {{.Namespace}} hook Succeeded Sync pod/hook created`, vars))
|
||||
}).
|
||||
@@ -101,7 +102,7 @@ func TestNormalArgoCDCommandsExecuteOverPluginsWithSameName(t *testing.T) {
|
||||
expected := Tmpl(
|
||||
t,
|
||||
`{{.Name}} https://kubernetes.default.svc {{.Namespace}} default Synced Healthy Manual <none>`,
|
||||
map[string]any{"Name": Name(), "Namespace": DeploymentNamespace()})
|
||||
map[string]any{"Name": ctx.AppName(), "Namespace": ctx.DeploymentNamespace()})
|
||||
assert.Contains(t, NormalizeOutput(output), expected)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -48,11 +48,7 @@ func TestSimpleClusterGeneratorExternalNamespace(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
CreateNamespace(externalNamespace).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -148,9 +144,6 @@ func TestSimpleClusterGenerator(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -273,9 +266,6 @@ func TestClusterGeneratorWithLocalCluster(t *testing.T) {
|
||||
// Create a ClusterGenerator-based ApplicationSet
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "in-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -364,9 +354,6 @@ func TestSimpleClusterGeneratorAddingCluster(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -447,9 +434,6 @@ func TestSimpleClusterGeneratorDeletingCluster(t *testing.T) {
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
CreateClusterSecret("my-secret2", "cluster2", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -540,9 +524,6 @@ func TestClusterGeneratorWithFlatListMode(t *testing.T) {
|
||||
When().
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
GoTemplate: true,
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
|
||||
@@ -25,20 +25,19 @@ func TestClusterList(t *testing.T) {
|
||||
expected := fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
|
||||
https://kubernetes.default.svc in-cluster %v Successful `, fixture.GetVersions(t).ServerVersion)
|
||||
|
||||
clusterFixture.
|
||||
Given(t).
|
||||
Project(fixture.ProjectName)
|
||||
ctx := clusterFixture.Given(t)
|
||||
ctx.Project(fixture.ProjectName)
|
||||
|
||||
// We need an application targeting the cluster, otherwise the test will
|
||||
// fail if run isolated.
|
||||
app.GivenWithSameState(t).
|
||||
app.GivenWithSameState(ctx).
|
||||
Path(guestbookPath).
|
||||
When().
|
||||
CreateApp()
|
||||
|
||||
tries := 25
|
||||
for i := 0; i <= tries; i++ {
|
||||
clusterFixture.GivenWithSameState(t).
|
||||
clusterFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
List().
|
||||
Then().
|
||||
@@ -56,9 +55,8 @@ https://kubernetes.default.svc in-cluster %v Successful `, fixtu
|
||||
}
|
||||
|
||||
func TestClusterAdd(t *testing.T) {
|
||||
clusterFixture.
|
||||
Given(t).
|
||||
Project(fixture.ProjectName).
|
||||
ctx := clusterFixture.Given(t)
|
||||
ctx.Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
When().
|
||||
@@ -66,21 +64,20 @@ func TestClusterAdd(t *testing.T) {
|
||||
List().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Equal(t, fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
|
||||
https://kubernetes.default.svc test-cluster-add %v Successful %s`, fixture.GetVersions(t).ServerVersion, fixture.ProjectName), output)
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
|
||||
})
|
||||
}
|
||||
|
||||
func TestClusterAddPermissionDenied(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
ctx := accountFixture.Given(t)
|
||||
ctx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
SetPermissions([]fixture.ACL{}, "org-admin")
|
||||
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -94,8 +91,8 @@ func TestClusterAddPermissionDenied(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterAddAllowed(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
accountCtx := accountFixture.Given(t)
|
||||
accountCtx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -112,8 +109,8 @@ func TestClusterAddAllowed(t *testing.T) {
|
||||
},
|
||||
}, "org-admin")
|
||||
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
ctx := clusterFixture.GivenWithSameState(accountCtx)
|
||||
ctx.Project(fixture.ProjectName).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -122,14 +119,13 @@ func TestClusterAddAllowed(t *testing.T) {
|
||||
List().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Equal(t, fmt.Sprintf(`SERVER NAME VERSION STATUS MESSAGE PROJECT
|
||||
https://kubernetes.default.svc test-cluster-add-allowed %v Successful argo-project`, fixture.GetVersions(t).ServerVersion), output)
|
||||
assert.Contains(t, fixture.NormalizeOutput(output), fmt.Sprintf(`https://kubernetes.default.svc %s %v Successful %s`, ctx.GetName(), fixture.GetVersions(t).ServerVersion, fixture.ProjectName))
|
||||
})
|
||||
}
|
||||
|
||||
func TestClusterListDenied(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
ctx := accountFixture.Given(t)
|
||||
ctx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -142,7 +138,7 @@ func TestClusterListDenied(t *testing.T) {
|
||||
}, "org-admin")
|
||||
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -156,17 +152,14 @@ func TestClusterListDenied(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterSet(t *testing.T) {
|
||||
fixture.EnsureCleanState(t)
|
||||
defer fixture.RecordTestRun(t)
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
Project(fixture.ProjectName).
|
||||
Name("in-cluster").
|
||||
ctx := clusterFixture.Given(t)
|
||||
ctx.Project(fixture.ProjectName).
|
||||
Namespaces([]string{"namespace-edit-1", "namespace-edit-2"}).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
When().
|
||||
Create().
|
||||
SetNamespaces().
|
||||
GetByName("in-cluster").
|
||||
GetByName().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Contains(t, output, "namespace-edit-1")
|
||||
@@ -225,8 +218,8 @@ func TestClusterURLInRestAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterDeleteDenied(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("test").
|
||||
ctx := accountFixture.Given(t)
|
||||
ctx.Name("test").
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -245,7 +238,7 @@ func TestClusterDeleteDenied(t *testing.T) {
|
||||
|
||||
// Attempt to remove cluster creds by name
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -259,7 +252,7 @@ func TestClusterDeleteDenied(t *testing.T) {
|
||||
|
||||
// Attempt to remove cluster creds by server
|
||||
clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
@@ -273,8 +266,8 @@ func TestClusterDeleteDenied(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClusterDelete(t *testing.T) {
|
||||
accountFixture.Given(t).
|
||||
Name("default").
|
||||
ctx := clusterFixture.Given(t)
|
||||
accountFixture.GivenWithSameState(ctx).
|
||||
When().
|
||||
Create().
|
||||
Login().
|
||||
@@ -296,14 +289,18 @@ func TestClusterDelete(t *testing.T) {
|
||||
},
|
||||
}, "org-admin")
|
||||
|
||||
clstAction := clusterFixture.
|
||||
GivenWithSameState(t).
|
||||
Name("default").
|
||||
clstAction := ctx.
|
||||
Project(fixture.ProjectName).
|
||||
Upsert(true).
|
||||
Server(KubernetesInternalAPIServerAddr).
|
||||
When().
|
||||
CreateWithRBAC()
|
||||
clstAction.
|
||||
Then().
|
||||
Expect().
|
||||
AndCLIOutput(func(_ string, err error) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
// Check that RBAC is created
|
||||
_, err := fixture.Run("", "kubectl", "get", "serviceaccount", "argocd-manager", "-n", "kube-system")
|
||||
@@ -318,7 +315,7 @@ func TestClusterDelete(t *testing.T) {
|
||||
clstAction.DeleteByName().
|
||||
Then().
|
||||
AndCLIOutput(func(output string, _ error) {
|
||||
assert.Equal(t, "Cluster 'default' removed", output)
|
||||
assert.Equal(t, fmt.Sprintf("Cluster '%s' removed", ctx.GetName()), output)
|
||||
})
|
||||
|
||||
// Check that RBAC is removed after delete
|
||||
|
||||
@@ -61,12 +61,8 @@ func TestSimpleClusterDecisionResourceGeneratorExternalNamespace(t *testing.T) {
|
||||
CreatePlacementDecisionConfigMap("my-configmap").
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
CreateNamespace(externalNamespace).
|
||||
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -170,9 +166,6 @@ func TestSimpleClusterDecisionResourceGenerator(t *testing.T) {
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -281,9 +274,6 @@ func TestSimpleClusterDecisionResourceGeneratorAddingCluster(t *testing.T) {
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -377,9 +367,6 @@ func TestSimpleClusterDecisionResourceGeneratorDeletingClusterSecret(t *testing.
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
@@ -481,9 +468,6 @@ func TestSimpleClusterDecisionResourceGeneratorDeletingClusterFromResource(t *te
|
||||
CreatePlacementDecision("my-placementdecision").
|
||||
StatusUpdatePlacementDecision("my-placementdecision", clusterList).
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "simple-cluster-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-guestbook"},
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
func TestCrossNamespaceOwnership(t *testing.T) {
|
||||
var clusterRoleUID string
|
||||
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("cross-namespace-ownership").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -54,14 +55,14 @@ metadata:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]`, DeploymentNamespace(), clusterRoleUID)
|
||||
verbs: ["get", "list"]`, ctx.DeploymentNamespace(), clusterRoleUID)
|
||||
|
||||
_, err := Run("", "sh", "-c", fmt.Sprintf("echo '%s' | kubectl apply -f -", roleYaml))
|
||||
require.NoError(t, err)
|
||||
t.Logf("Created Role in app namespace: %s", DeploymentNamespace())
|
||||
t.Logf("Created Role in app namespace: %s", ctx.DeploymentNamespace())
|
||||
|
||||
// Create another namespace for cross-namespace testing
|
||||
otherNamespace := DeploymentNamespace() + "-other"
|
||||
otherNamespace := ctx.DeploymentNamespace() + "-other"
|
||||
_, err = Run("", "kubectl", "create", "namespace", otherNamespace)
|
||||
if err != nil {
|
||||
// Namespace might already exist, that's ok
|
||||
@@ -185,7 +186,8 @@ rules:
|
||||
func TestCrossNamespaceOwnershipWithRefresh(t *testing.T) {
|
||||
var clusterRoleUID string
|
||||
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("cross-namespace-ownership").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -215,7 +217,7 @@ metadata:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]`, DeploymentNamespace(), clusterRoleUID)
|
||||
verbs: ["get", "list"]`, ctx.DeploymentNamespace(), clusterRoleUID)
|
||||
|
||||
_, err := Run("", "sh", "-c", fmt.Sprintf("echo '%s' | kubectl apply -f -", roleYaml))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
. "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
@@ -23,11 +21,7 @@ import (
|
||||
func TestCustomToolWithGitCreds(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitcreds")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitcreds").
|
||||
CustomCACertAdded().
|
||||
// add the private repo with credentials
|
||||
HTTPSRepoURLAdded(true).
|
||||
@@ -41,7 +35,7 @@ func TestCustomToolWithGitCreds(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "argocd", output)
|
||||
})
|
||||
@@ -51,11 +45,7 @@ func TestCustomToolWithGitCreds(t *testing.T) {
|
||||
func TestCustomToolWithGitCredsTemplate(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitcredstemplate")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitcredstemplate").
|
||||
CustomCACertAdded().
|
||||
// add the git creds template
|
||||
HTTPSCredentialsUserPassAdded().
|
||||
@@ -71,17 +61,17 @@ func TestCustomToolWithGitCredsTemplate(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitAskpass}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "argocd", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitUsername}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitUsername}")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitPassword}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.GitPassword}")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, output)
|
||||
})
|
||||
@@ -92,11 +82,7 @@ func TestCustomToolWithSSHGitCreds(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
// path does not matter, we ignore it
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitsshcreds")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitsshcreds").
|
||||
// add the private repo with ssh credentials
|
||||
CustomSSHKnownHostsAdded().
|
||||
SSHRepoURLAdded(true).
|
||||
@@ -111,12 +97,12 @@ func TestCustomToolWithSSHGitCreds(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", fixture.Name(), "-o", "jsonpath={.metadata.annotations.GitSSHCommand}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.GetName(), "-o", "jsonpath={.metadata.annotations.GitSSHCommand}")
|
||||
require.NoError(t, err)
|
||||
assert.Regexp(t, `-i [^ ]+`, output, "test plugin expects $GIT_SSH_COMMAND to contain the option '-i <path to ssh private key>'")
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", fixture.Name(), "-o", "jsonpath={.metadata.annotations.GitSSHCredsFileSHA}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.GetName(), "-o", "jsonpath={.metadata.annotations.GitSSHCredsFileSHA}")
|
||||
require.NoError(t, err)
|
||||
assert.Regexp(t, `\w+\s+[\/\w]+`, output, "git ssh credentials file should be able to be read, hashing the contents")
|
||||
})
|
||||
@@ -126,11 +112,7 @@ func TestCustomToolWithSSHGitCredsDisabled(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
// path does not matter, we ignore it
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-gitsshcreds-disable-provide")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-gitsshcreds-disable-provide").
|
||||
CustomCACertAdded().
|
||||
// add the private repo with ssh credentials
|
||||
CustomSSHKnownHostsAdded().
|
||||
@@ -150,11 +132,7 @@ func TestCustomToolWithSSHGitCredsDisabled(t *testing.T) {
|
||||
func TestCustomToolWithEnv(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-fileName")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-fileName").
|
||||
// does not matter what the path is
|
||||
Path("cmp-fileName").
|
||||
When().
|
||||
@@ -175,18 +153,18 @@ func TestCustomToolWithEnv(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "baz", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Foo}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Foo}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "bar", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedKubeVersion, output)
|
||||
}).
|
||||
@@ -195,7 +173,7 @@ func TestCustomToolWithEnv(t *testing.T) {
|
||||
expectedAPIVersionSlice := strings.Split(expectedAPIVersion, ",")
|
||||
sort.Strings(expectedAPIVersionSlice)
|
||||
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
require.NoError(t, err)
|
||||
outputSlice := strings.Split(output, ",")
|
||||
sort.Strings(outputSlice)
|
||||
@@ -211,11 +189,7 @@ func TestCustomToolSyncAndDiffLocal(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
appPath := filepath.Join(testdataPath, "guestbook")
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-kustomize")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-kustomize").
|
||||
// does not matter what the path is
|
||||
Path("guestbook").
|
||||
When().
|
||||
@@ -233,29 +207,11 @@ func TestCustomToolSyncAndDiffLocal(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func startCMPServer(t *testing.T, configFile string) {
|
||||
t.Helper()
|
||||
pluginSockFilePath := fixture.TmpDir + fixture.PluginSockFilePath
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd-cmp-server")
|
||||
// ARGOCD_PLUGINSOCKFILEPATH should be set as the same value as repo server env var
|
||||
t.Setenv("ARGOCD_PLUGINSOCKFILEPATH", pluginSockFilePath)
|
||||
if _, err := os.Stat(pluginSockFilePath); os.IsNotExist(err) {
|
||||
// path/to/whatever does not exist
|
||||
err := os.Mkdir(pluginSockFilePath, 0o700)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunWithStdin("", "", "../../dist/argocd", "--config-dir-path", configFile))
|
||||
}
|
||||
|
||||
// Discover by fileName
|
||||
func TestCMPDiscoverWithFileName(t *testing.T) {
|
||||
pluginName := "cmp-fileName"
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-fileName")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-fileName").
|
||||
Path(pluginName + "/subdir").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -269,11 +225,7 @@ func TestCMPDiscoverWithFileName(t *testing.T) {
|
||||
// Discover by Find glob
|
||||
func TestCMPDiscoverWithFindGlob(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-glob")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-find-glob").
|
||||
Path("guestbook").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -287,11 +239,7 @@ func TestCMPDiscoverWithFindGlob(t *testing.T) {
|
||||
// Discover by Plugin Name
|
||||
func TestCMPDiscoverWithPluginName(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-glob")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-find-glob").
|
||||
Path("guestbook").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -310,11 +258,7 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
pluginName := "cmp-find-command"
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-command")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-find-command").
|
||||
Path(pluginName).
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -324,13 +268,13 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
And(func(_ *Application) {
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.Bar}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "baz", output)
|
||||
}).
|
||||
And(func(_ *Application) {
|
||||
expectedKubeVersion := fixture.GetVersions(t).ServerVersion.Format("%s.%s")
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeVersion}")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedKubeVersion, output)
|
||||
}).
|
||||
@@ -339,7 +283,7 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
expectedAPIVersionSlice := strings.Split(expectedAPIVersion, ",")
|
||||
sort.Strings(expectedAPIVersionSlice)
|
||||
|
||||
output, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
output, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", ctx.AppName(), "-o", "jsonpath={.metadata.annotations.KubeApiVersion}")
|
||||
require.NoError(t, err)
|
||||
outputSlice := strings.Split(output, ",")
|
||||
sort.Strings(outputSlice)
|
||||
@@ -349,12 +293,9 @@ func TestCMPDiscoverWithFindCommandWithEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPruneResourceFromCMP(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-find-glob")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
RunningCMPServer("./testdata/cmp-find-glob").
|
||||
Path("guestbook").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -366,18 +307,14 @@ func TestPruneResourceFromCMP(t *testing.T) {
|
||||
Then().
|
||||
Expect(DoesNotExist()).
|
||||
AndAction(func() {
|
||||
_, err := fixture.Run("", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "deployment", "guestbook-ui")
|
||||
_, err := fixture.Run("", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "deployment", "guestbook-ui")
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPreserveFileModeForCMP(t *testing.T) {
|
||||
Given(t).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/cmp-preserve-file-mode")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata/cmp-preserve-file-mode").
|
||||
Path("cmp-preserve-file-mode").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
@@ -393,11 +330,7 @@ func TestPreserveFileModeForCMP(t *testing.T) {
|
||||
|
||||
func TestCMPWithSymlinkPartialFiles(t *testing.T) {
|
||||
Given(t, fixture.WithTestData("testdata2")).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata2/cmp-symlink")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata2/cmp-symlink").
|
||||
Path("guestbook-partial-symlink-files").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -410,11 +343,7 @@ func TestCMPWithSymlinkPartialFiles(t *testing.T) {
|
||||
|
||||
func TestCMPWithSymlinkFiles(t *testing.T) {
|
||||
Given(t, fixture.WithTestData("testdata2")).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata2/cmp-symlink")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata2/cmp-symlink").
|
||||
Path("guestbook-symlink-files").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -427,11 +356,7 @@ func TestCMPWithSymlinkFiles(t *testing.T) {
|
||||
|
||||
func TestCMPWithSymlinkFolder(t *testing.T) {
|
||||
Given(t, fixture.WithTestData("testdata2")).
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata2/cmp-symlink")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
RunningCMPServer("./testdata2/cmp-symlink").
|
||||
Path("guestbook-symlink-folder").
|
||||
When().
|
||||
CreateApp().
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestDeploymentWithAnnotationTrackingMode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, out, fmt.Sprintf(`annotations:
|
||||
argocd.argoproj.io/tracking-id: %s:apps/Deployment:%s/nginx-deployment
|
||||
`, ctx.AppName(), DeploymentNamespace()))
|
||||
`, ctx.AppName(), ctx.DeploymentNamespace()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ func TestDeploymentWithoutTrackingMode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, out, fmt.Sprintf(`annotations:
|
||||
argocd.argoproj.io/tracking-id: %s:apps/Deployment:%s/nginx-deployment
|
||||
`, ctx.AppName(), DeploymentNamespace()))
|
||||
`, ctx.AppName(), ctx.DeploymentNamespace()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -128,19 +128,20 @@ func TestDeployToKubernetesAPIURLWithQueryParameter(t *testing.T) {
|
||||
// We test with both a cluster-scoped, and a non-cluster scoped, Argo CD Cluster Secret.
|
||||
clusterScopedParam := []bool{false, true}
|
||||
for _, clusterScoped := range clusterScopedParam {
|
||||
EnsureCleanState(t)
|
||||
ctx := Given(t)
|
||||
|
||||
// Simulate two users, each with their own Argo CD cluster secret that can only deploy to their Namespace
|
||||
users := []string{E2ETestPrefix + "user1", E2ETestPrefix + "user2"}
|
||||
users := []string{"user1", "user2"}
|
||||
|
||||
for _, username := range users {
|
||||
createNamespaceScopedUser(t, username, clusterScoped)
|
||||
ns, _, destName := createNamespaceScopedUser(ctx, username, clusterScoped)
|
||||
|
||||
GivenWithSameState(t).
|
||||
GivenWithSameState(ctx).
|
||||
Name("e2e-test-app-"+username).
|
||||
DestName(destName).
|
||||
Path("deployment").
|
||||
When().
|
||||
CreateWithNoNameSpace("--dest-namespace", username).
|
||||
CreateWithNoNameSpace("--dest-namespace", ns).
|
||||
Sync().
|
||||
Then().
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
@@ -159,13 +160,23 @@ func TestArgoCDSupportsMultipleServiceAccountsWithDifferingRBACOnSameCluster(t *
|
||||
clusterScopedParam := []bool{ /*false,*/ true}
|
||||
|
||||
for _, clusterScoped := range clusterScopedParam {
|
||||
EnsureCleanState(t)
|
||||
ctx := Given(t)
|
||||
|
||||
// Simulate two users, each with their own Argo CD cluster secret that can only deploy to their Namespace
|
||||
users := []string{E2ETestPrefix + "user1", E2ETestPrefix + "user2"}
|
||||
users := []string{"user1", "user2"}
|
||||
nsInfo := make(map[string]struct {
|
||||
namespace string
|
||||
serviceAccount string
|
||||
destName string
|
||||
})
|
||||
|
||||
for _, username := range users {
|
||||
createNamespaceScopedUser(t, username, clusterScoped)
|
||||
ns, sa, destName := createNamespaceScopedUser(ctx, username, clusterScoped)
|
||||
nsInfo[username] = struct {
|
||||
namespace string
|
||||
serviceAccount string
|
||||
destName string
|
||||
}{namespace: ns, serviceAccount: sa, destName: destName}
|
||||
}
|
||||
|
||||
for idx, username := range users {
|
||||
@@ -174,21 +185,21 @@ func TestArgoCDSupportsMultipleServiceAccountsWithDifferingRBACOnSameCluster(t *
|
||||
otherUser := users[(idx+1)%len(users)]
|
||||
|
||||
// e.g. Attempt to deploy to user1's namespace, with user2's cluster Secret. This should fail, as user2's cluster Secret does not have the requisite permissions.
|
||||
consequences := GivenWithSameState(t).
|
||||
consequences := GivenWithSameState(ctx).
|
||||
Name("e2e-test-app-"+username).
|
||||
DestName(E2ETestPrefix+"cluster-"+otherUser).
|
||||
DestName(nsInfo[otherUser].destName).
|
||||
Path("deployment").
|
||||
When().
|
||||
CreateWithNoNameSpace("--dest-namespace", username).IgnoreErrors().
|
||||
CreateWithNoNameSpace("--dest-namespace", nsInfo[username].namespace).IgnoreErrors().
|
||||
Sync().Then()
|
||||
|
||||
// The error message differs based on whether the Argo CD Cluster Secret is namespace-scoped or cluster-scoped, but the idea is the same:
|
||||
// - Even when deploying to the same cluster using 2 separate ServiceAccounts, the RBAC of those ServiceAccounts should continue to fully enforce RBAC boundaries.
|
||||
|
||||
if !clusterScoped {
|
||||
consequences.Expect(Condition(ApplicationConditionComparisonError, "Namespace \""+username+"\" for Deployment \"nginx-deployment\" is not managed"))
|
||||
consequences.Expect(Condition(ApplicationConditionComparisonError, "Namespace \""+nsInfo[username].namespace+"\" for Deployment \"nginx-deployment\" is not managed"))
|
||||
} else {
|
||||
consequences.Expect(OperationMessageContains("User \"system:serviceaccount:" + otherUser + ":" + otherUser + "-serviceaccount\" cannot create resource \"deployments\" in API group \"apps\" in the namespace \"" + username + "\""))
|
||||
consequences.Expect(OperationMessageContains("User \"system:serviceaccount:" + nsInfo[otherUser].namespace + ":" + nsInfo[otherUser].serviceAccount + "\" cannot create resource \"deployments\" in API group \"apps\" in the namespace \"" + nsInfo[username].namespace + "\""))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -196,10 +207,13 @@ func TestArgoCDSupportsMultipleServiceAccountsWithDifferingRBACOnSameCluster(t *
|
||||
|
||||
// generateReadOnlyClusterRoleandBindingForServiceAccount creates a ClusterRole/Binding that allows a ServiceAccount in a given namespace to read all resources on a cluster.
|
||||
// - This allows the ServiceAccount to be used within a cluster-scoped Argo CD Cluster Secret
|
||||
func generateReadOnlyClusterRoleandBindingForServiceAccount(roleSuffix string, serviceAccountNS string) (rbacv1.ClusterRole, rbacv1.ClusterRoleBinding) {
|
||||
func generateReadOnlyClusterRoleandBindingForServiceAccount(c *Context, username, serviceAccountName, namespace string) (rbacv1.ClusterRole, rbacv1.ClusterRoleBinding) {
|
||||
clusterRole := rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "read-all-" + roleSuffix,
|
||||
Name: DnsFriendly("read-all-"+username, "-"+c.ShortID()),
|
||||
Labels: map[string]string{
|
||||
TestingLabel: "true",
|
||||
},
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
@@ -210,12 +224,15 @@ func generateReadOnlyClusterRoleandBindingForServiceAccount(roleSuffix string, s
|
||||
|
||||
clusterRoleBinding := rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "read-all-" + roleSuffix,
|
||||
Name: DnsFriendly("read-all-"+username, "-"+c.ShortID()),
|
||||
Labels: map[string]string{
|
||||
TestingLabel: "true",
|
||||
},
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: serviceAccountNS,
|
||||
Name: roleSuffix + "-serviceaccount",
|
||||
Namespace: namespace,
|
||||
Name: serviceAccountName,
|
||||
}},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
@@ -258,26 +275,29 @@ func buildArgoCDClusterSecret(secretName, secretNamespace, clusterName, clusterS
|
||||
// createNamespaceScopedUser
|
||||
// - username = name of Namespace the simulated user is able to deploy to
|
||||
// - clusterScopedSecrets = whether the Service Account is namespace-scoped or cluster-scoped.
|
||||
func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecrets bool) {
|
||||
t.Helper()
|
||||
func createNamespaceScopedUser(c *Context, username string, clusterScopedSecrets bool) (string, string, string) {
|
||||
c.T().Helper()
|
||||
// Create a new Namespace for our simulated user
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: username,
|
||||
Name: DnsFriendly(username, "-"+c.ShortID()),
|
||||
Labels: map[string]string{
|
||||
TestingLabel: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := KubeClientset.CoreV1().Namespaces().Create(t.Context(), &ns, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err := KubeClientset.CoreV1().Namespaces().Create(c.T().Context(), &ns, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
// Create a ServiceAccount in that Namespace, which will be used for the Argo CD Cluster SEcret
|
||||
serviceAccountName := username + "-serviceaccount"
|
||||
serviceAccountName := DnsFriendly(username, "-sa-"+c.ShortID())
|
||||
err = clusterauth.CreateServiceAccount(KubeClientset, serviceAccountName, ns.Name)
|
||||
require.NoError(t, err)
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
// Create a Role that allows the ServiceAccount to read/write all within the Namespace
|
||||
role := rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "allow-all",
|
||||
Name: DnsFriendly("allow-all", "-"+c.ShortID()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
@@ -286,13 +306,13 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
APIGroups: []string{"*"},
|
||||
}},
|
||||
}
|
||||
_, err = KubeClientset.RbacV1().Roles(role.Namespace).Create(t.Context(), &role, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.RbacV1().Roles(role.Namespace).Create(c.T().Context(), &role, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
// Bind the Role with the ServiceAccount in the Namespace
|
||||
roleBinding := rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: E2ETestPrefix + "allow-all-binding",
|
||||
Name: DnsFriendly("allow-all-binding", "-"+c.ShortID()),
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{{
|
||||
@@ -306,32 +326,32 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
_, err = KubeClientset.RbacV1().RoleBindings(roleBinding.Namespace).Create(t.Context(), &roleBinding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.RbacV1().RoleBindings(roleBinding.Namespace).Create(c.T().Context(), &roleBinding, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
var token string
|
||||
|
||||
// Attempting to patch the ServiceAccount can intermittently fail with 'failed to patch serviceaccount "(...)" with bearer token secret: Operation cannot be fulfilled on serviceaccounts "(...)": the object has been modified; please apply your changes to the latest version and try again'
|
||||
// We thus keep trying for up to 20 seconds.
|
||||
waitErr := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 20*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
waitErr := wait.PollUntilContextTimeout(c.T().Context(), 1*time.Second, 20*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
// Retrieve the bearer token from the ServiceAccount
|
||||
token, err = clusterauth.GetServiceAccountBearerToken(KubeClientset, ns.Name, serviceAccountName, time.Second*60)
|
||||
|
||||
// Success is no error and a real token, otherwise keep trying
|
||||
return (err == nil && token != ""), nil
|
||||
})
|
||||
require.NoError(t, waitErr)
|
||||
require.NotEmpty(t, token)
|
||||
require.NoError(c.T(), waitErr)
|
||||
require.NotEmpty(c.T(), token)
|
||||
|
||||
// In order to test a cluster-scoped Argo CD Cluster Secret, we may optionally grant the ServiceAccount read-all permissions at cluster scope.
|
||||
if clusterScopedSecrets {
|
||||
clusterRole, clusterRoleBinding := generateReadOnlyClusterRoleandBindingForServiceAccount(username, username)
|
||||
clusterRole, clusterRoleBinding := generateReadOnlyClusterRoleandBindingForServiceAccount(c, username, serviceAccountName, ns.Name)
|
||||
|
||||
_, err := KubeClientset.RbacV1().ClusterRoles().Create(t.Context(), &clusterRole, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err := KubeClientset.RbacV1().ClusterRoles().Create(c.T().Context(), &clusterRole, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
_, err = KubeClientset.RbacV1().ClusterRoleBindings().Create(t.Context(), &clusterRoleBinding, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.RbacV1().ClusterRoleBindings().Create(c.T().Context(), &clusterRoleBinding, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
}
|
||||
|
||||
// Build the Argo CD Cluster Secret by using the service account token, and extracting needed values from kube config
|
||||
@@ -343,10 +363,10 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
}
|
||||
|
||||
jsonStringBytes, err := json.Marshal(clusterSecretConfigJSON)
|
||||
require.NoError(t, err)
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
_, apiURL, err := extractKubeConfigValues()
|
||||
require.NoError(t, err)
|
||||
require.NoError(c.T(), err)
|
||||
|
||||
clusterResourcesField := ""
|
||||
namespacesField := ""
|
||||
@@ -358,13 +378,14 @@ func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecre
|
||||
|
||||
// We create an Argo CD cluster Secret declaratively, using the K8s client, rather than via CLI, as the CLI doesn't currently
|
||||
// support Kubernetes API server URLs with query parameters.
|
||||
|
||||
secret := buildArgoCDClusterSecret("test-"+username, ArgoCDNamespace, E2ETestPrefix+"cluster-"+username, apiURL+"?user="+username,
|
||||
clusterName := DnsFriendly("test-"+username, "-"+c.ShortID())
|
||||
secret := buildArgoCDClusterSecret(clusterName, ArgoCDNamespace, clusterName, apiURL+"?user="+username,
|
||||
string(jsonStringBytes), clusterResourcesField, namespacesField)
|
||||
|
||||
// Finally, create the Cluster secret in the Argo CD E2E namespace
|
||||
_, err = KubeClientset.CoreV1().Secrets(secret.Namespace).Create(t.Context(), &secret, metav1.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
_, err = KubeClientset.CoreV1().Secrets(secret.Namespace).Create(c.T().Context(), &secret, metav1.CreateOptions{})
|
||||
require.NoError(c.T(), err)
|
||||
return ns.Name, serviceAccountName, clusterName
|
||||
}
|
||||
|
||||
// extractKubeConfigValues returns contents of the local environment's kubeconfig, using standard path resolution mechanism.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package project
|
||||
package account
|
||||
|
||||
import (
|
||||
"time"
|
||||
@@ -19,59 +19,59 @@ type Actions struct {
|
||||
}
|
||||
|
||||
func (a *Actions) prepareCanIGetLogsArgs() []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return []string{
|
||||
"account", "can-i", "get", "logs", a.context.project + "/*",
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Actions) CanIGetLogs() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli(a.prepareCanIGetLogsArgs()...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) prepareSetPasswordArgs(account string) []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return []string{
|
||||
"account", "update-password", "--account", account, "--current-password", fixture.AdminPassword, "--new-password", fixture.DefaultTestUserPassword,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Actions) Create() *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetAccounts(map[string][]string{
|
||||
a.context.name: {"login"},
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetAccounts(map[string][]string{
|
||||
a.context.GetName(): {"login"},
|
||||
}))
|
||||
_, _ = fixture.RunCli(a.prepareSetPasswordArgs(a.context.name)...)
|
||||
_, _ = fixture.RunCli(a.prepareSetPasswordArgs(a.context.GetName())...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetPermissions(permissions []fixture.ACL, roleName string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetPermissions(permissions, a.context.name, roleName))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetPermissions(permissions, a.context.GetName(), roleName))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetParamInSettingConfigMap(key, value string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetParamInSettingConfigMap(key, value))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetParamInSettingConfigMap(key, value))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Login() *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.LoginAs(a.context.name))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.LoginAs(a.context.GetName()))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package project
|
||||
package account
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -21,19 +21,19 @@ type Consequences struct {
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(account *account.Account, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.get())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) CurrentUser(block func(user *session.GetUserInfoResponse, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.getCurrentUser())
|
||||
return c
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (c *Consequences) get() (*account.Account, error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, acc := range accList.Items {
|
||||
if acc.Name == c.context.name {
|
||||
if acc.Name == c.context.GetName() {
|
||||
return acc, nil
|
||||
}
|
||||
}
|
||||
@@ -53,9 +53,9 @@ func (c *Consequences) get() (*account.Account, error) {
|
||||
}
|
||||
|
||||
func (c *Consequences) getCurrentUser() (*session.GetUserInfoResponse, error) {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
closer, client, err := fixture.ArgoCDClientset.NewSessionClient()
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
defer utilio.Close(closer)
|
||||
return client.GetUserInfo(context.Background(), &session.GetUserInfoRequest{})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package project
|
||||
package account
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -7,18 +7,25 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
// seconds
|
||||
name string
|
||||
*fixture.TestState
|
||||
|
||||
project string
|
||||
}
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return &Context{t: t, name: fixture.Name()}
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return &Context{TestState: state}
|
||||
}
|
||||
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx)}
|
||||
}
|
||||
|
||||
func (c *Context) Project(project string) *Context {
|
||||
@@ -26,12 +33,8 @@ func (c *Context) Project(project string) *Context {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) GetName() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.name = name
|
||||
c.SetName(name)
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
@@ -17,43 +17,43 @@ type Actions struct {
|
||||
}
|
||||
|
||||
func (a *Actions) prepareExportCommand() []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"export", "--application-namespaces", fixture.AppNamespace()}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (a *Actions) prepareImportCommand() []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"import", "--application-namespaces", fixture.AppNamespace(), "-"}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (a *Actions) RunExport() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli(a.prepareExportCommand()...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) RunImport(stdin string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCliWithStdin(stdin, a.prepareImportCommand()...)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = RunCli(args...)
|
||||
}
|
||||
|
||||
func (a *Actions) runCliWithStdin(stdin string, args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = RunCliWithStdin(stdin, args...)
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
@@ -14,13 +14,13 @@ type Consequences struct {
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func()) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -7,20 +7,23 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
*fixture.TestState
|
||||
}
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(t)
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return &Context{TestState: state}
|
||||
}
|
||||
|
||||
func GivenWithSameState(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
return &Context{t}
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx)}
|
||||
}
|
||||
|
||||
func (c *Context) And(block func()) *Context {
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -41,71 +44,71 @@ func (a *Actions) DoNotIgnoreErrors() *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) PatchFile(file string, jsonPatch string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.Patch(a.context.t, a.context.path+"/"+file, jsonPatch)
|
||||
a.context.T().Helper()
|
||||
fixture.Patch(a.context.T(), a.context.path+"/"+file, jsonPatch)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteFile(file string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.Delete(a.context.t, a.context.path+"/"+file)
|
||||
a.context.T().Helper()
|
||||
fixture.Delete(a.context.T(), a.context.path+"/"+file)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) WriteFile(fileName, fileContents string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.WriteFile(a.context.t, a.context.path+"/"+fileName, fileContents)
|
||||
a.context.T().Helper()
|
||||
fixture.WriteFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddFile(fileName, fileContents string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddFile(a.context.t, a.context.path+"/"+fileName, fileContents)
|
||||
a.context.T().Helper()
|
||||
fixture.AddFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddSignedFile(fileName, fileContents string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddSignedFile(a.context.t, a.context.path+"/"+fileName, fileContents)
|
||||
a.context.T().Helper()
|
||||
fixture.AddSignedFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddSignedTag(name string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddSignedTag(a.context.t, name)
|
||||
a.context.T().Helper()
|
||||
fixture.AddSignedTag(a.context.T(), name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddTag(name string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddTag(a.context.t, name)
|
||||
a.context.T().Helper()
|
||||
fixture.AddTag(a.context.T(), name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddAnnotatedTag(name string, message string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddAnnotatedTag(a.context.t, name, message)
|
||||
a.context.T().Helper()
|
||||
fixture.AddAnnotatedTag(a.context.T(), name, message)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddTagWithForce(name string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddTagWithForce(a.context.t, name)
|
||||
a.context.T().Helper()
|
||||
fixture.AddTagWithForce(a.context.T(), name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) RemoveSubmodule() *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.RemoveSubmodule(a.context.t)
|
||||
a.context.T().Helper()
|
||||
fixture.RemoveSubmodule(a.context.T())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) CreateFromPartialFile(data string, flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
tmpFile, err := os.CreateTemp("", "")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
_, err = tmpFile.WriteString(data)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
|
||||
args := append([]string{
|
||||
"app", "create",
|
||||
@@ -113,7 +116,7 @@ func (a *Actions) CreateFromPartialFile(data string, flags ...string) *Actions {
|
||||
"--name", a.context.AppName(),
|
||||
"--repo", fixture.RepoURL(a.context.repoURLType),
|
||||
"--dest-server", a.context.destServer,
|
||||
"--dest-namespace", fixture.DeploymentNamespace(),
|
||||
"--dest-namespace", a.context.DeploymentNamespace(),
|
||||
}, flags...)
|
||||
if a.context.appNamespace != "" {
|
||||
args = append(args, "--app-namespace", a.context.appNamespace)
|
||||
@@ -124,7 +127,7 @@ func (a *Actions) CreateFromPartialFile(data string, flags ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.context.AppName(),
|
||||
@@ -138,7 +141,7 @@ func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags
|
||||
},
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: a.context.destServer,
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
Namespace: a.context.DeploymentNamespace(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -167,9 +170,9 @@ func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags
|
||||
handler(app)
|
||||
data := grpc.MustMarshal(app)
|
||||
tmpFile, err := os.CreateTemp("", "")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
_, err = tmpFile.Write(data)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
|
||||
args := append([]string{
|
||||
"app", "create",
|
||||
@@ -181,7 +184,7 @@ func (a *Actions) CreateFromFile(handler func(app *v1alpha1.Application), flags
|
||||
}
|
||||
|
||||
func (a *Actions) CreateMultiSourceAppFromFile(flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.context.AppName(),
|
||||
@@ -192,7 +195,7 @@ func (a *Actions) CreateMultiSourceAppFromFile(flags ...string) *Actions {
|
||||
Sources: a.context.sources,
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: a.context.destServer,
|
||||
Namespace: fixture.DeploymentNamespace(),
|
||||
Namespace: a.context.DeploymentNamespace(),
|
||||
},
|
||||
SyncPolicy: &v1alpha1.SyncPolicy{
|
||||
Automated: &v1alpha1.SyncPolicyAutomated{
|
||||
@@ -204,9 +207,9 @@ func (a *Actions) CreateMultiSourceAppFromFile(flags ...string) *Actions {
|
||||
|
||||
data := grpc.MustMarshal(app)
|
||||
tmpFile, err := os.CreateTemp("", "")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
_, err = tmpFile.Write(data)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
|
||||
args := append([]string{
|
||||
"app", "create",
|
||||
@@ -226,7 +229,7 @@ func (a *Actions) CreateWithNoNameSpace(args ...string) *Actions {
|
||||
|
||||
func (a *Actions) CreateApp(args ...string) *Actions {
|
||||
args = a.prepareCreateAppArgs(args)
|
||||
args = append(args, "--dest-namespace", fixture.DeploymentNamespace())
|
||||
args = append(args, "--dest-namespace", a.context.DeploymentNamespace())
|
||||
|
||||
// are you adding new context values? if you only use them for this func, then use args instead
|
||||
a.runCli(args...)
|
||||
@@ -235,7 +238,7 @@ func (a *Actions) CreateApp(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) prepareCreateAppArgs(args []string) []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{
|
||||
"app", "create", a.context.AppQualifiedName(),
|
||||
}, args...)
|
||||
@@ -326,33 +329,33 @@ func (a *Actions) prepareCreateAppArgs(args []string) []string {
|
||||
}
|
||||
|
||||
func (a *Actions) Declarative(filename string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return a.DeclarativeWithCustomRepo(filename, fixture.RepoURL(a.context.repoURLType))
|
||||
}
|
||||
|
||||
func (a *Actions) DeclarativeWithCustomRepo(filename string, repoURL string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
values := map[string]any{
|
||||
"ArgoCDNamespace": fixture.TestNamespace(),
|
||||
"DeploymentNamespace": fixture.DeploymentNamespace(),
|
||||
"DeploymentNamespace": a.context.DeploymentNamespace(),
|
||||
"Name": a.context.AppName(),
|
||||
"Path": a.context.path,
|
||||
"Project": a.context.project,
|
||||
"RepoURL": repoURL,
|
||||
}
|
||||
a.lastOutput, a.lastError = fixture.Declarative(a.context.t, filename, values)
|
||||
a.lastOutput, a.lastError = fixture.Declarative(a.context.T(), filename, values)
|
||||
a.verifyAction()
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) PatchApp(patch string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "patch", a.context.AppQualifiedName(), "--patch", patch)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) PatchAppHttp(patch string) *Actions { //nolint:revive //FIXME(var-naming)
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
var application v1alpha1.Application
|
||||
patchType := "merge"
|
||||
appName := a.context.AppQualifiedName()
|
||||
@@ -364,17 +367,17 @@ func (a *Actions) PatchAppHttp(patch string) *Actions { //nolint:revive //FIXME(
|
||||
AppNamespace: &appNamespace,
|
||||
}
|
||||
jsonBytes, err := json.MarshalIndent(patchRequest, "", " ")
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
err = fixture.DoHttpJsonRequest("PATCH",
|
||||
fmt.Sprintf("/api/v1/applications/%v", appName),
|
||||
&application,
|
||||
jsonBytes...)
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AppSet(flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"app", "set", a.context.AppQualifiedName()}
|
||||
args = append(args, flags...)
|
||||
a.runCli(args...)
|
||||
@@ -382,7 +385,7 @@ func (a *Actions) AppSet(flags ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) AppUnSet(flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"app", "unset", a.context.AppQualifiedName()}
|
||||
args = append(args, flags...)
|
||||
a.runCli(args...)
|
||||
@@ -390,9 +393,9 @@ func (a *Actions) AppUnSet(flags ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) Sync(args ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{"app", "sync"}, args...)
|
||||
if a.context.name != "" {
|
||||
if a.context.GetName() != "" {
|
||||
args = append(args, a.context.AppQualifiedName())
|
||||
}
|
||||
args = append(args, "--timeout", strconv.Itoa(a.context.timeout))
|
||||
@@ -436,21 +439,25 @@ func (a *Actions) Sync(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) ConfirmDeletion() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
a.runCli("app", "confirm-deletion", a.context.AppQualifiedName())
|
||||
|
||||
// Always sleep more than a second after the confirmation so the timestamp
|
||||
// is not valid for immediate subsequent operations
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) TerminateOp() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "terminate-op", a.context.AppQualifiedName())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Refresh(refreshType v1alpha1.RefreshType) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
flag := map[v1alpha1.RefreshType]string{
|
||||
v1alpha1.RefreshTypeNormal: "--refresh",
|
||||
v1alpha1.RefreshTypeHard: "--hard-refresh",
|
||||
@@ -462,33 +469,33 @@ func (a *Actions) Refresh(refreshType v1alpha1.RefreshType) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) Get() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "get", a.context.AppQualifiedName())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Delete(cascade bool) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "delete", a.context.AppQualifiedName(), fmt.Sprintf("--cascade=%v", cascade), "--yes")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteBySelector(selector string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "delete", "--selector="+selector, "--yes")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteBySelectorWithWait(selector string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("app", "delete", "--selector="+selector, "--yes", "--wait")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Wait(args ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{"app", "wait"}, args...)
|
||||
if a.context.name != "" {
|
||||
if a.context.GetName() != "" {
|
||||
args = append(args, a.context.AppQualifiedName())
|
||||
}
|
||||
args = append(args, "--timeout", strconv.Itoa(a.context.timeout))
|
||||
@@ -497,65 +504,111 @@ func (a *Actions) Wait(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) SetParamInSettingConfigMap(key, value string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetParamInSettingConfigMap(key, value))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetParamInSettingConfigMap(key, value))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) And(block func()) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
block()
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
return &Consequences{a.context, a, 15}
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
a.verifyAction()
|
||||
}
|
||||
|
||||
func (a *Actions) verifyAction() {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
if !a.ignoreErrors {
|
||||
a.Then().Expect(Success(""))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Actions) SetTrackingMethod(trackingMethod string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetTrackingMethod(trackingMethod))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetTrackingMethod(trackingMethod))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetInstallationID(installationID string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetInstallationID(installationID))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetInstallationID(installationID))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetTrackingLabel(trackingLabel string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetTrackingLabel(trackingLabel))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetTrackingLabel(trackingLabel))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) WithImpersonationEnabled(serviceAccountName string, policyRules []rbacv1.PolicyRule) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetImpersonationEnabled("true"))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetImpersonationEnabled("true"))
|
||||
if serviceAccountName == "" || policyRules == nil {
|
||||
return a
|
||||
}
|
||||
require.NoError(a.context.t, fixture.CreateRBACResourcesForImpersonation(serviceAccountName, policyRules))
|
||||
require.NoError(a.context.T(), createRBACResourcesForImpersonation(a.context.DeploymentNamespace(), serviceAccountName, policyRules))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) WithImpersonationDisabled() *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetImpersonationEnabled("false"))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetImpersonationEnabled("false"))
|
||||
return a
|
||||
}
|
||||
|
||||
// TODO: Ensure service account name and other resources have unique names based on the test context
|
||||
// TODO: This function should be moved to the project context since impersonation is a project concept, not application.
|
||||
func createRBACResourcesForImpersonation(namespace string, serviceAccountName string, policyRules []rbacv1.PolicyRule) error {
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
},
|
||||
}
|
||||
_, err := fixture.KubeClientset.CoreV1().ServiceAccounts(namespace).Create(context.Background(), sa, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "role"),
|
||||
},
|
||||
Rules: policyRules,
|
||||
}
|
||||
_, err = fixture.KubeClientset.RbacV1().Roles(namespace).Create(context.Background(), role, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rolebinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "rolebinding"),
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "role"),
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccountName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = fixture.KubeClientset.RbacV1().RoleBindings(namespace).Create(context.Background(), rolebinding, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ type Consequences struct {
|
||||
|
||||
func (c *Consequences) Expect(e Expectation) *Consequences {
|
||||
// this invocation makes sure this func is not reported as the cause of the failure - we are a "test helper"
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
var message string
|
||||
var state state
|
||||
sleepIntervals := []time.Duration{
|
||||
@@ -50,19 +50,19 @@ func (c *Consequences) Expect(e Expectation) *Consequences {
|
||||
log.Infof("expectation succeeded: %s", message)
|
||||
return c
|
||||
case failed:
|
||||
c.context.t.Fatalf("failed expectation: %s", message)
|
||||
c.context.T().Fatalf("failed expectation: %s", message)
|
||||
return c
|
||||
}
|
||||
log.Infof("pending: %s", message)
|
||||
}
|
||||
c.context.t.Fatal("timeout waiting for: " + message)
|
||||
c.context.T().Fatal("timeout waiting for: " + message)
|
||||
return c
|
||||
}
|
||||
|
||||
// ExpectConsistently will continuously evaluate a condition, and it must be true each time it is evaluated, otherwise the test is failed. The condition will be repeatedly evaluated until 'expirationDuration' is met, waiting 'waitDuration' after each success.
|
||||
func (c *Consequences) ExpectConsistently(e Expectation, waitDuration time.Duration, expirationDuration time.Duration) *Consequences {
|
||||
// this invocation makes sure this func is not reported as the cause of the failure - we are a "test helper"
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
|
||||
expiration := time.Now().Add(expirationDuration)
|
||||
for time.Now().Before(expiration) {
|
||||
@@ -71,7 +71,7 @@ func (c *Consequences) ExpectConsistently(e Expectation, waitDuration time.Durat
|
||||
case succeeded:
|
||||
log.Infof("expectation succeeded: %s", message)
|
||||
case failed:
|
||||
c.context.t.Fatalf("failed expectation: %s", message)
|
||||
c.context.T().Fatalf("failed expectation: %s", message)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -85,13 +85,13 @@ func (c *Consequences) ExpectConsistently(e Expectation, waitDuration time.Durat
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(app *v1alpha1.Application)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.app())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndAction(block func()) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block()
|
||||
return c
|
||||
}
|
||||
@@ -106,9 +106,9 @@ func (c *Consequences) When() *Actions {
|
||||
}
|
||||
|
||||
func (c *Consequences) app() *v1alpha1.Application {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
app, err := c.get()
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
return app
|
||||
}
|
||||
|
||||
@@ -117,16 +117,16 @@ func (c *Consequences) get() (*v1alpha1.Application, error) {
|
||||
}
|
||||
|
||||
func (c *Consequences) resource(kind, name, namespace string) v1alpha1.ResourceStatus {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
closer, client, err := fixture.ArgoCDClientset.NewApplicationClient()
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
defer utilio.Close(closer)
|
||||
app, err := client.Get(context.Background(), &applicationpkg.ApplicationQuery{
|
||||
Name: ptr.To(c.context.AppName()),
|
||||
Projects: []string{c.context.project},
|
||||
AppNamespace: ptr.To(c.context.appNamespace),
|
||||
})
|
||||
require.NoError(c.context.t, err)
|
||||
require.NoError(c.context.T(), err)
|
||||
for _, r := range app.Status.Resources {
|
||||
if r.Kind == kind && r.Name == name && (namespace == "" || namespace == r.Namespace) {
|
||||
return r
|
||||
@@ -141,7 +141,7 @@ func (c *Consequences) resource(kind, name, namespace string) v1alpha1.ResourceS
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -15,17 +16,18 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
// Context implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
*fixture.TestState
|
||||
path string
|
||||
chart string
|
||||
ociRegistry string
|
||||
ociRegistryPath string
|
||||
repoURLType fixture.RepoURLType
|
||||
// seconds
|
||||
timeout int
|
||||
name string
|
||||
timeout int
|
||||
|
||||
appNamespace string
|
||||
destServer string
|
||||
destName string
|
||||
@@ -64,8 +66,8 @@ type ContextArgs struct {
|
||||
|
||||
func Given(t *testing.T, opts ...fixture.TestOption) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t, opts...)
|
||||
return GivenWithSameState(t)
|
||||
state := fixture.EnsureCleanState(t, opts...)
|
||||
return GivenWithSameState(state)
|
||||
}
|
||||
|
||||
func GivenWithNamespace(t *testing.T, namespace string) *Context {
|
||||
@@ -75,17 +77,18 @@ func GivenWithNamespace(t *testing.T, namespace string) *Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func GivenWithSameState(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
// ARGOCD_E2E_DEFAULT_TIMEOUT can be used to override the default timeout
|
||||
// for any context.
|
||||
timeout := env.ParseNumFromEnv("ARGOCD_E2E_DEFAULT_TIMEOUT", 20, 0, 180)
|
||||
return &Context{
|
||||
t: t,
|
||||
TestState: fixture.NewTestStateFromContext(ctx),
|
||||
destServer: v1alpha1.KubernetesInternalAPIServerAddr,
|
||||
destName: "in-cluster",
|
||||
repoURLType: fixture.RepoURLTypeFile,
|
||||
name: fixture.Name(),
|
||||
timeout: timeout,
|
||||
project: "default",
|
||||
prune: true,
|
||||
@@ -93,8 +96,16 @@ func GivenWithSameState(t *testing.T) *Context {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.SetName(name)
|
||||
return c
|
||||
}
|
||||
|
||||
// AppName returns the unique application name for the test context.
|
||||
// Unique application names protects from potential conflicts between test run
|
||||
// caused by the tracking annotation on existing objects
|
||||
func (c *Context) AppName() string {
|
||||
return c.name
|
||||
return c.GetName()
|
||||
}
|
||||
|
||||
func (c *Context) AppQualifiedName() string {
|
||||
@@ -118,129 +129,134 @@ func (c *Context) SetAppNamespace(namespace string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) GPGPublicKeyAdded() *Context {
|
||||
gpgkeys.AddGPGPublicKey(c.t)
|
||||
gpgkeys.AddGPGPublicKey(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) GPGPublicKeyRemoved() *Context {
|
||||
gpgkeys.DeleteGPGPublicKey(c.t)
|
||||
gpgkeys.DeleteGPGPublicKey(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) CustomCACertAdded() *Context {
|
||||
certs.AddCustomCACert(c.t)
|
||||
certs.AddCustomCACert(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) CustomSSHKnownHostsAdded() *Context {
|
||||
certs.AddCustomSSHKnownHostsKeys(c.t)
|
||||
certs.AddCustomSSHKnownHostsKeys(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSRepoURLAdded(withCreds bool, opts ...repos.AddRepoOpts) *Context {
|
||||
repos.AddHTTPSRepo(c.t, false, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
repos.AddHTTPSRepo(c.T(), false, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSInsecureRepoURLAdded(withCreds bool, opts ...repos.AddRepoOpts) *Context {
|
||||
repos.AddHTTPSRepo(c.t, true, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
repos.AddHTTPSRepo(c.T(), true, withCreds, "", fixture.RepoURLTypeHTTPS, opts...)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSInsecureRepoURLWithClientCertAdded() *Context {
|
||||
repos.AddHTTPSRepoClientCert(c.t, true)
|
||||
repos.AddHTTPSRepoClientCert(c.T(), true)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSRepoURLWithClientCertAdded() *Context {
|
||||
repos.AddHTTPSRepoClientCert(c.t, false)
|
||||
repos.AddHTTPSRepoClientCert(c.T(), false)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SubmoduleHTTPSRepoURLAdded(withCreds bool) *Context {
|
||||
fixture.CreateSubmoduleRepos(c.t, "https")
|
||||
repos.AddHTTPSRepo(c.t, false, withCreds, "", fixture.RepoURLTypeHTTPSSubmoduleParent)
|
||||
fixture.CreateSubmoduleRepos(c.T(), "https")
|
||||
repos.AddHTTPSRepo(c.T(), false, withCreds, "", fixture.RepoURLTypeHTTPSSubmoduleParent)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) WriteCredentials(insecure bool) *Context {
|
||||
repos.AddWriteCredentials(c.T(), c.GetName(), insecure, c.repoURLType)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SSHRepoURLAdded(withCreds bool) *Context {
|
||||
repos.AddSSHRepo(c.t, false, withCreds, fixture.RepoURLTypeSSH)
|
||||
repos.AddSSHRepo(c.T(), false, withCreds, fixture.RepoURLTypeSSH)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SSHInsecureRepoURLAdded(withCreds bool) *Context {
|
||||
repos.AddSSHRepo(c.t, true, withCreds, fixture.RepoURLTypeSSH)
|
||||
repos.AddSSHRepo(c.T(), true, withCreds, fixture.RepoURLTypeSSH)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SubmoduleSSHRepoURLAdded(withCreds bool) *Context {
|
||||
fixture.CreateSubmoduleRepos(c.t, "ssh")
|
||||
repos.AddSSHRepo(c.t, false, withCreds, fixture.RepoURLTypeSSHSubmoduleParent)
|
||||
fixture.CreateSubmoduleRepos(c.T(), "ssh")
|
||||
repos.AddSSHRepo(c.T(), false, withCreds, fixture.RepoURLTypeSSHSubmoduleParent)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmRepoAdded(name string) *Context {
|
||||
repos.AddHelmRepo(c.t, name)
|
||||
repos.AddHelmRepo(c.T(), name)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmOCIRepoAdded(name string) *Context {
|
||||
repos.AddHelmOCIRepo(c.t, name)
|
||||
repos.AddHelmOCIRepo(c.T(), name)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushImageToOCIRegistry(pathName, tag string) *Context {
|
||||
repos.PushImageToOCIRegistry(c.t, pathName, tag)
|
||||
repos.PushImageToOCIRegistry(c.T(), pathName, tag)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushImageToAuthenticatedOCIRegistry(pathName, tag string) *Context {
|
||||
repos.PushImageToAuthenticatedOCIRegistry(c.t, pathName, tag)
|
||||
repos.PushImageToAuthenticatedOCIRegistry(c.T(), pathName, tag)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushChartToOCIRegistry(chartPathName, chartName, chartVersion string) *Context {
|
||||
repos.PushChartToOCIRegistry(c.t, chartPathName, chartName, chartVersion)
|
||||
repos.PushChartToOCIRegistry(c.T(), chartPathName, chartName, chartVersion)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) PushChartToAuthenticatedOCIRegistry(chartPathName, chartName, chartVersion string) *Context {
|
||||
repos.PushChartToAuthenticatedOCIRegistry(c.t, chartPathName, chartName, chartVersion)
|
||||
repos.PushChartToAuthenticatedOCIRegistry(c.T(), chartPathName, chartName, chartVersion)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSCredentialsUserPassAdded() *Context {
|
||||
repos.AddHTTPSCredentialsUserPass(c.t)
|
||||
repos.AddHTTPSCredentialsUserPass(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmHTTPSCredentialsUserPassAdded() *Context {
|
||||
repos.AddHelmHTTPSCredentialsTLSClientCert(c.t)
|
||||
repos.AddHelmHTTPSCredentialsTLSClientCert(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HelmoOCICredentialsWithoutUserPassAdded() *Context {
|
||||
repos.AddHelmoOCICredentialsWithoutUserPass(c.t)
|
||||
repos.AddHelmoOCICredentialsWithoutUserPass(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSCredentialsTLSClientCertAdded() *Context {
|
||||
repos.AddHTTPSCredentialsTLSClientCert(c.t)
|
||||
repos.AddHTTPSCredentialsTLSClientCert(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SSHCredentialsAdded() *Context {
|
||||
repos.AddSSHCredentials(c.t)
|
||||
repos.AddSSHCredentials(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) OCIRepoAdded(name, imagePath string) *Context {
|
||||
repos.AddOCIRepo(c.t, name, imagePath)
|
||||
repos.AddOCIRepo(c.T(), name, imagePath)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) AuthenticatedOCIRepoAdded(name, imagePath string) *Context {
|
||||
repos.AddAuthenticatedOCIRepo(c.t, name, imagePath)
|
||||
repos.AddAuthenticatedOCIRepo(c.T(), name, imagePath)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -250,8 +266,8 @@ func (c *Context) OCIRegistry(registry string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) ProjectSpec(spec v1alpha1.AppProjectSpec) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetProjectSpec(c.project, spec))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetProjectSpec(c.project, spec))
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -265,15 +281,6 @@ func (c *Context) RepoURLType(urlType fixture.RepoURLType) *Context {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) GetName() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.name = name
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) Path(path string) *Context {
|
||||
c.path = path
|
||||
return c
|
||||
@@ -336,6 +343,10 @@ func (c *Context) DestServer(destServer string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) DestName(destName string) *Context {
|
||||
if destName != "in-cluster" {
|
||||
suffix := "-" + c.ShortID()
|
||||
destName = fixture.DnsFriendly(strings.TrimSuffix(destName, suffix), suffix)
|
||||
}
|
||||
c.destName = destName
|
||||
c.isDestServerInferred = true
|
||||
return c
|
||||
@@ -368,14 +379,14 @@ func (c *Context) NameSuffix(nameSuffix string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) ResourceOverrides(overrides map[string]v1alpha1.ResourceOverride) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetResourceOverrides(overrides))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetResourceOverrides(overrides))
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) ResourceFilter(filter settings.ResourcesFilter) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetResourceFilter(filter))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetResourceFilter(filter))
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -445,14 +456,14 @@ func (c *Context) HelmSkipTests() *Context {
|
||||
}
|
||||
|
||||
func (c *Context) SetTrackingMethod(trackingMethod string) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetTrackingMethod(trackingMethod))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetTrackingMethod(trackingMethod))
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) SetInstallationID(installationID string) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.SetInstallationID(installationID))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.SetInstallationID(installationID))
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -466,7 +477,7 @@ func (c *Context) Sources(sources []v1alpha1.ApplicationSource) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) RegisterKustomizeVersion(version, path string) *Context {
|
||||
c.t.Helper()
|
||||
require.NoError(c.t, fixture.RegisterKustomizeVersion(version, path))
|
||||
c.T().Helper()
|
||||
require.NoError(c.T(), fixture.RegisterKustomizeVersion(version, path))
|
||||
return c
|
||||
}
|
||||
|
||||
87
test/e2e/fixture/app/context_cmp.go
Normal file
87
test/e2e/fixture/app/context_cmp.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmpserver/plugin"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
)
|
||||
|
||||
// RunningCMPServer starts a CMP server with the given config directory and waits for it to be ready.
|
||||
// It blocks until the CMP socket is created or times out after 10 seconds.
|
||||
func (c *Context) RunningCMPServer(configFile string) *Context {
|
||||
c.T().Helper()
|
||||
startCMPServer(c.T(), configFile)
|
||||
c.T().Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
return c
|
||||
}
|
||||
|
||||
// startCMPServer starts the CMP server and waits for its socket to be ready.
|
||||
// It blocks until the socket file is created or times out after 10 seconds.
|
||||
func startCMPServer(t *testing.T, configDir string) {
|
||||
t.Helper()
|
||||
pluginSockFilePath := path.Join(fixture.TmpDir(), fixture.PluginSockFilePath)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd-cmp-server")
|
||||
// ARGOCD_PLUGINSOCKFILEPATH should be set as the same value as repo server env var
|
||||
t.Setenv("ARGOCD_PLUGINSOCKFILEPATH", pluginSockFilePath)
|
||||
if _, err := os.Stat(pluginSockFilePath); os.IsNotExist(err) {
|
||||
err := os.Mkdir(pluginSockFilePath, 0o700)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Read plugin config to get expected socket path
|
||||
cfg, err := plugin.ReadPluginConfig(configDir)
|
||||
require.NoError(t, err, "failed to read plugin config from %s", configDir)
|
||||
expectedSocket := cfg.Address()
|
||||
|
||||
// Remove stale socket if it exists from a previous test run
|
||||
if err := os.Remove(expectedSocket); err != nil && !os.IsNotExist(err) {
|
||||
require.NoError(t, err, "failed to remove stale socket")
|
||||
}
|
||||
|
||||
// Start CMP server in goroutine (non-blocking)
|
||||
go func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunWithStdin("", "", "../../dist/argocd", "--config-dir-path", configDir))
|
||||
}()
|
||||
|
||||
// Wait for socket to be created
|
||||
waitForSocket(t, expectedSocket, 10*time.Second)
|
||||
}
|
||||
|
||||
// waitForSocket polls for a socket file to exist with exponential backoff
|
||||
func waitForSocket(t *testing.T, socketPath string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
sleepIntervals := []time.Duration{
|
||||
10 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
50 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
200 * time.Millisecond,
|
||||
500 * time.Millisecond,
|
||||
}
|
||||
sleepIdx := 0
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
if info, err := os.Stat(socketPath); err == nil {
|
||||
if info.Mode()&os.ModeSocket != 0 {
|
||||
return // Socket exists and is a socket!
|
||||
}
|
||||
}
|
||||
if sleepIdx < len(sleepIntervals) {
|
||||
time.Sleep(sleepIntervals[sleepIdx])
|
||||
sleepIdx++
|
||||
} else {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf("CMP socket %s did not appear within %v", socketPath, timeout)
|
||||
}
|
||||
@@ -272,9 +272,19 @@ func DoesNotExistNow() Expectation {
|
||||
}
|
||||
}
|
||||
|
||||
func App(predicate func(app *v1alpha1.Application) bool) Expectation {
|
||||
return func(c *Consequences) (state, string) {
|
||||
app := c.app().DeepCopy()
|
||||
if predicate(app) {
|
||||
return succeeded, "app predicate matches"
|
||||
}
|
||||
return pending, "app predicate does not match"
|
||||
}
|
||||
}
|
||||
|
||||
func Pod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
return func(_ *Consequences) (state, string) {
|
||||
pods, err := pods()
|
||||
return func(c *Consequences) (state, string) {
|
||||
pods, err := pods(c.context.DeploymentNamespace())
|
||||
if err != nil {
|
||||
return failed, err.Error()
|
||||
}
|
||||
@@ -288,8 +298,8 @@ func Pod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
}
|
||||
|
||||
func NotPod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
return func(_ *Consequences) (state, string) {
|
||||
pods, err := pods()
|
||||
return func(c *Consequences) (state, string) {
|
||||
pods, err := pods(c.context.DeploymentNamespace())
|
||||
if err != nil {
|
||||
return failed, err.Error()
|
||||
}
|
||||
@@ -302,9 +312,8 @@ func NotPod(predicate func(p corev1.Pod) bool) Expectation {
|
||||
}
|
||||
}
|
||||
|
||||
func pods() (*corev1.PodList, error) {
|
||||
fixture.KubeClientset.CoreV1()
|
||||
pods, err := fixture.KubeClientset.CoreV1().Pods(fixture.DeploymentNamespace()).List(context.Background(), metav1.ListOptions{})
|
||||
func pods(namespace string) (*corev1.PodList, error) {
|
||||
pods, err := fixture.KubeClientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
@@ -320,7 +329,6 @@ func NoNamespace(name string) Expectation {
|
||||
}
|
||||
|
||||
func namespace(name string) (*corev1.Namespace, error) {
|
||||
fixture.KubeClientset.CoreV1()
|
||||
return fixture.KubeClientset.CoreV1().Namespaces().Get(context.Background(), name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
|
||||
@@ -54,13 +54,13 @@ func (a *Actions) DoNotIgnoreErrors() *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) And(block func()) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
block()
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
@@ -80,8 +80,8 @@ func (a *Actions) SwitchToArgoCDNamespace() *Actions {
|
||||
// CreateClusterSecret creates a faux cluster secret, with the given cluster server and cluster name (this cluster
|
||||
// will not actually be used by the Argo CD controller, but that's not needed for our E2E tests)
|
||||
func (a *Actions) CreateClusterSecret(secretName string, clusterName string, clusterServer string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
a.context.T().Helper()
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
var serviceAccountName string
|
||||
|
||||
@@ -154,8 +154,8 @@ func (a *Actions) CreateClusterSecret(secretName string, clusterName string, clu
|
||||
|
||||
// DeleteClusterSecret deletes a faux cluster secret
|
||||
func (a *Actions) DeleteClusterSecret(secretName string) *Actions {
|
||||
a.context.t.Helper()
|
||||
err := utils.GetE2EFixtureK8sClient(a.context.t).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Delete(context.Background(), secretName, metav1.DeleteOptions{})
|
||||
a.context.T().Helper()
|
||||
err := utils.GetE2EFixtureK8sClient(a.context.T()).KubeClientset.CoreV1().Secrets(fixture.TestNamespace()).Delete(context.Background(), secretName, metav1.DeleteOptions{})
|
||||
|
||||
a.describeAction = fmt.Sprintf("deleting cluster Secret '%s'", secretName)
|
||||
a.lastOutput, a.lastError = "", err
|
||||
@@ -166,8 +166,8 @@ func (a *Actions) DeleteClusterSecret(secretName string) *Actions {
|
||||
|
||||
// DeleteConfigMap deletes a faux cluster secret
|
||||
func (a *Actions) DeleteConfigMap(configMapName string) *Actions {
|
||||
a.context.t.Helper()
|
||||
err := utils.GetE2EFixtureK8sClient(a.context.t).KubeClientset.CoreV1().ConfigMaps(fixture.TestNamespace()).Delete(context.Background(), configMapName, metav1.DeleteOptions{})
|
||||
a.context.T().Helper()
|
||||
err := utils.GetE2EFixtureK8sClient(a.context.T()).KubeClientset.CoreV1().ConfigMaps(fixture.TestNamespace()).Delete(context.Background(), configMapName, metav1.DeleteOptions{})
|
||||
|
||||
a.describeAction = fmt.Sprintf("deleting configMap '%s'", configMapName)
|
||||
a.lastOutput, a.lastError = "", err
|
||||
@@ -178,8 +178,8 @@ func (a *Actions) DeleteConfigMap(configMapName string) *Actions {
|
||||
|
||||
// DeletePlacementDecision deletes a faux cluster secret
|
||||
func (a *Actions) DeletePlacementDecision(placementDecisionName string) *Actions {
|
||||
a.context.t.Helper()
|
||||
err := utils.GetE2EFixtureK8sClient(a.context.t).DynamicClientset.Resource(pdGVR).Namespace(fixture.TestNamespace()).Delete(context.Background(), placementDecisionName, metav1.DeleteOptions{})
|
||||
a.context.T().Helper()
|
||||
err := utils.GetE2EFixtureK8sClient(a.context.T()).DynamicClientset.Resource(pdGVR).Namespace(fixture.TestNamespace()).Delete(context.Background(), placementDecisionName, metav1.DeleteOptions{})
|
||||
|
||||
a.describeAction = fmt.Sprintf("deleting placement decision '%s'", placementDecisionName)
|
||||
a.lastOutput, a.lastError = "", err
|
||||
@@ -191,9 +191,9 @@ func (a *Actions) DeletePlacementDecision(placementDecisionName string) *Actions
|
||||
// Create a temporary namespace, from utils.ApplicationSet, for use by the test.
|
||||
// This namespace will be deleted on subsequent tests.
|
||||
func (a *Actions) CreateNamespace(namespace string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
_, err := fixtureClient.KubeClientset.CoreV1().Namespaces().Create(context.Background(),
|
||||
&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
@@ -207,9 +207,9 @@ func (a *Actions) CreateNamespace(namespace string) *Actions {
|
||||
|
||||
// Create creates an ApplicationSet using the provided value
|
||||
func (a *Actions) Create(appSet v1alpha1.ApplicationSet) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
appSet.APIVersion = "argoproj.io/v1alpha1"
|
||||
appSet.Kind = "ApplicationSet"
|
||||
@@ -227,10 +227,12 @@ func (a *Actions) Create(appSet v1alpha1.ApplicationSet) *Actions {
|
||||
appSetClientSet = fixtureClient.AppSetClientset
|
||||
}
|
||||
|
||||
// AppSet name is not configurable and should always be unique, based on the context name
|
||||
appSet.Name = a.context.GetName()
|
||||
|
||||
newResource, err := appSetClientSet.Create(context.Background(), utils.MustToUnstructured(&appSet), metav1.CreateOptions{})
|
||||
|
||||
if err == nil {
|
||||
a.context.name = newResource.GetName()
|
||||
a.context.namespace = newResource.GetNamespace()
|
||||
}
|
||||
|
||||
@@ -243,8 +245,8 @@ func (a *Actions) Create(appSet v1alpha1.ApplicationSet) *Actions {
|
||||
|
||||
// Create Role/RoleBinding to allow ApplicationSet to list the PlacementDecisions
|
||||
func (a *Actions) CreatePlacementRoleAndRoleBinding() *Actions {
|
||||
a.context.t.Helper()
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
a.context.T().Helper()
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
var err error
|
||||
|
||||
@@ -293,9 +295,9 @@ func (a *Actions) CreatePlacementRoleAndRoleBinding() *Actions {
|
||||
|
||||
// Create a ConfigMap for the ClusterResourceList generator
|
||||
func (a *Actions) CreatePlacementDecisionConfigMap(configMapName string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
_, err := fixtureClient.KubeClientset.CoreV1().ConfigMaps(fixture.TestNamespace()).Get(context.Background(), configMapName, metav1.GetOptions{})
|
||||
|
||||
@@ -325,9 +327,9 @@ func (a *Actions) CreatePlacementDecisionConfigMap(configMapName string) *Action
|
||||
}
|
||||
|
||||
func (a *Actions) CreatePlacementDecision(placementDecisionName string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t).DynamicClientset
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T()).DynamicClientset
|
||||
|
||||
_, err := fixtureClient.Resource(pdGVR).Namespace(fixture.TestNamespace()).Get(
|
||||
context.Background(),
|
||||
@@ -363,9 +365,9 @@ func (a *Actions) CreatePlacementDecision(placementDecisionName string) *Actions
|
||||
}
|
||||
|
||||
func (a *Actions) StatusUpdatePlacementDecision(placementDecisionName string, clusterList []any) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t).DynamicClientset
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T()).DynamicClientset
|
||||
placementDecision, err := fixtureClient.Resource(pdGVR).Namespace(fixture.TestNamespace()).Get(
|
||||
context.Background(),
|
||||
placementDecisionName,
|
||||
@@ -390,9 +392,9 @@ func (a *Actions) StatusUpdatePlacementDecision(placementDecisionName string, cl
|
||||
|
||||
// Delete deletes the ApplicationSet within the context
|
||||
func (a *Actions) Delete() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
var appSetClientSet dynamic.ResourceInterface
|
||||
|
||||
@@ -408,8 +410,8 @@ func (a *Actions) Delete() *Actions {
|
||||
}
|
||||
|
||||
deleteProp := metav1.DeletePropagationForeground
|
||||
err := appSetClientSet.Delete(context.Background(), a.context.name, metav1.DeleteOptions{PropagationPolicy: &deleteProp})
|
||||
a.describeAction = fmt.Sprintf("Deleting ApplicationSet '%s/%s' %v", a.context.namespace, a.context.name, err)
|
||||
err := appSetClientSet.Delete(context.Background(), a.context.GetName(), metav1.DeleteOptions{PropagationPolicy: &deleteProp})
|
||||
a.describeAction = fmt.Sprintf("Deleting ApplicationSet '%s/%s' %v", a.context.namespace, a.context.GetName(), err)
|
||||
a.lastOutput, a.lastError = "", err
|
||||
a.verifyAction()
|
||||
|
||||
@@ -420,7 +422,7 @@ func (a *Actions) Delete() *Actions {
|
||||
func (a *Actions) get() (*v1alpha1.ApplicationSet, error) {
|
||||
appSet := v1alpha1.ApplicationSet{}
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
var appSetClientSet dynamic.ResourceInterface
|
||||
|
||||
@@ -434,7 +436,7 @@ func (a *Actions) get() (*v1alpha1.ApplicationSet, error) {
|
||||
appSetClientSet = fixtureClient.AppSetClientset
|
||||
}
|
||||
|
||||
newResource, err := appSetClientSet.Get(context.Background(), a.context.name, metav1.GetOptions{})
|
||||
newResource, err := appSetClientSet.Get(context.Background(), a.context.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -455,7 +457,7 @@ func (a *Actions) get() (*v1alpha1.ApplicationSet, error) {
|
||||
// Update retrieves the latest copy the ApplicationSet, then allows the caller to mutate it via 'toUpdate', with
|
||||
// the result applied back to the cluster resource
|
||||
func (a *Actions) Update(toUpdate func(*v1alpha1.ApplicationSet)) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
timeout := 30 * time.Second
|
||||
|
||||
@@ -483,7 +485,7 @@ func (a *Actions) Update(toUpdate func(*v1alpha1.ApplicationSet)) *Actions {
|
||||
toUpdate(appSet)
|
||||
a.describeAction = fmt.Sprintf("updating ApplicationSet '%s/%s'", appSet.Namespace, appSet.Name)
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.t)
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
|
||||
|
||||
var appSetClientSet dynamic.ResourceInterface
|
||||
|
||||
@@ -515,7 +517,7 @@ func (a *Actions) Update(toUpdate func(*v1alpha1.ApplicationSet)) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) verifyAction() {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
if a.describeAction != "" {
|
||||
log.Infof("action: %s", a.describeAction)
|
||||
@@ -528,7 +530,7 @@ func (a *Actions) verifyAction() {
|
||||
}
|
||||
|
||||
func (a *Actions) AppSet(appName string, flags ...string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args := []string{"app", "set", appName}
|
||||
args = append(args, flags...)
|
||||
a.runCli(args...)
|
||||
@@ -536,13 +538,13 @@ func (a *Actions) AppSet(appName string, flags ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
a.verifyAction()
|
||||
}
|
||||
|
||||
func (a *Actions) AddSignedFile(fileName, fileContents string) *Actions {
|
||||
a.context.t.Helper()
|
||||
fixture.AddSignedFile(a.context.t, a.context.path+"/"+fileName, fileContents)
|
||||
a.context.T().Helper()
|
||||
fixture.AddSignedFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
|
||||
return a
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func (c *Consequences) Expect(e Expectation) *Consequences {
|
||||
|
||||
func (c *Consequences) ExpectWithDuration(e Expectation, timeout time.Duration) *Consequences {
|
||||
// this invocation makes sure this func is not reported as the cause of the failure - we are a "test helper"
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
var message string
|
||||
var state state
|
||||
sleepIntervals := []time.Duration{
|
||||
@@ -51,17 +51,17 @@ func (c *Consequences) ExpectWithDuration(e Expectation, timeout time.Duration)
|
||||
log.Infof("expectation succeeded: %s", message)
|
||||
return c
|
||||
case failed:
|
||||
c.context.t.Fatalf("failed expectation: %s", message)
|
||||
c.context.T().Fatalf("failed expectation: %s", message)
|
||||
return c
|
||||
}
|
||||
log.Infof("expectation pending: %s", message)
|
||||
}
|
||||
c.context.t.Fatal("timeout waiting for: " + message)
|
||||
c.context.T().Fatal("timeout waiting for: " + message)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func()) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block()
|
||||
return c
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (c *Consequences) app(name string) *v1alpha1.Application {
|
||||
}
|
||||
|
||||
func (c *Consequences) apps() []v1alpha1.Application {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
var namespace string
|
||||
if c.context.switchToNamespace != "" {
|
||||
namespace = string(c.context.switchToNamespace)
|
||||
@@ -96,7 +96,7 @@ func (c *Consequences) apps() []v1alpha1.Application {
|
||||
namespace = fixture.TestNamespace()
|
||||
}
|
||||
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(c.context.t)
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(c.context.T())
|
||||
list, err := fixtureClient.AppClientset.ArgoprojV1alpha1().Applications(namespace).List(context.Background(), metav1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -108,8 +108,8 @@ func (c *Consequences) apps() []v1alpha1.Application {
|
||||
}
|
||||
|
||||
func (c *Consequences) applicationSet(applicationSetName string) *v1alpha1.ApplicationSet {
|
||||
c.context.t.Helper()
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(c.context.t)
|
||||
c.context.T().Helper()
|
||||
fixtureClient := utils.GetE2EFixtureK8sClient(c.context.T())
|
||||
|
||||
var appSetClientSet dynamic.ResourceInterface
|
||||
|
||||
@@ -119,7 +119,7 @@ func (c *Consequences) applicationSet(applicationSetName string) *v1alpha1.Appli
|
||||
appSetClientSet = fixtureClient.AppSetClientset
|
||||
}
|
||||
|
||||
list, err := appSetClientSet.Get(context.Background(), c.actions.context.name, metav1.GetOptions{})
|
||||
list, err := appSetClientSet.Get(context.Background(), applicationSetName, metav1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
|
||||
var appSet v1alpha1.ApplicationSet
|
||||
|
||||
@@ -12,10 +12,8 @@ import (
|
||||
|
||||
// Context implements the "given" part of given/when/then
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
*fixture.TestState
|
||||
|
||||
// name is the ApplicationSet's name, created by a Create action
|
||||
name string
|
||||
namespace string
|
||||
switchToNamespace utils.ExternalNamespace
|
||||
path string
|
||||
@@ -23,8 +21,13 @@ type Context struct {
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
|
||||
state := fixture.EnsureCleanState(t)
|
||||
|
||||
// TODO: Appset EnsureCleanState specific logic should be moved to the main EnsureCleanState function (https://github.com/argoproj/argo-cd/issues/24307)
|
||||
utils.EnsureCleanState(t)
|
||||
return &Context{t: t}
|
||||
|
||||
return &Context{TestState: state}
|
||||
}
|
||||
|
||||
func (c *Context) When() *Actions {
|
||||
@@ -48,11 +51,11 @@ func (c *Context) Path(path string) *Context {
|
||||
}
|
||||
|
||||
func (c *Context) GPGPublicKeyAdded() *Context {
|
||||
gpgkeys.AddGPGPublicKey(c.t)
|
||||
gpgkeys.AddGPGPublicKey(c.T())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Context) HTTPSInsecureRepoURLAdded(project string) *Context {
|
||||
repos.AddHTTPSRepo(c.t, true, true, project, fixture.RepoURLTypeHTTPS)
|
||||
repos.AddHTTPSRepo(c.T(), true, true, project, fixture.RepoURLTypeHTTPS)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package applicationsets
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -81,12 +82,12 @@ func ApplicationsExist(expectedApps []v1alpha1.Application) Expectation {
|
||||
|
||||
// ApplicationSetHasConditions checks whether each of the 'expectedConditions' exist in the ApplicationSet status, and are
|
||||
// equivalent to provided values.
|
||||
func ApplicationSetHasConditions(applicationSetName string, expectedConditions []v1alpha1.ApplicationSetCondition) Expectation {
|
||||
func ApplicationSetHasConditions(expectedConditions []v1alpha1.ApplicationSetCondition) Expectation {
|
||||
return func(c *Consequences) (state, string) {
|
||||
// retrieve the application set
|
||||
foundApplicationSet := c.applicationSet(applicationSetName)
|
||||
foundApplicationSet := c.applicationSet(c.context.GetName())
|
||||
if foundApplicationSet == nil {
|
||||
return pending, fmt.Sprintf("application set '%s' not found", applicationSetName)
|
||||
return pending, fmt.Sprintf("application set '%s' not found", c.context.GetName())
|
||||
}
|
||||
|
||||
if !conditionsAreEqual(&expectedConditions, &foundApplicationSet.Status.Conditions) {
|
||||
@@ -225,3 +226,95 @@ func appsAreEqual(one v1alpha1.Application, two v1alpha1.Application) bool {
|
||||
func conditionsAreEqual(one, two *[]v1alpha1.ApplicationSetCondition) bool {
|
||||
return reflect.DeepEqual(filterConditionFields(one), filterConditionFields(two))
|
||||
}
|
||||
|
||||
// CheckProgressiveSyncStatusCodeOfApplications checks whether the progressive sync status codes of applications in ApplicationSetApplicationStatus
|
||||
// match the expected values.
|
||||
func CheckProgressiveSyncStatusCodeOfApplications(expectedStatuses map[string]v1alpha1.ApplicationSetApplicationStatus) Expectation {
|
||||
return func(c *Consequences) (state, string) {
|
||||
appSet := c.applicationSet(c.context.GetName())
|
||||
if appSet == nil {
|
||||
return pending, fmt.Sprintf("no ApplicationSet found with name '%s'", c.context.GetName())
|
||||
}
|
||||
if appSet.Status.ApplicationStatus == nil {
|
||||
return pending, fmt.Sprintf("no application status found for ApplicationSet '%s'", c.context.GetName())
|
||||
}
|
||||
for _, appStatus := range appSet.Status.ApplicationStatus {
|
||||
expectedstatus, found := expectedStatuses[appStatus.Application]
|
||||
if !found {
|
||||
continue // Appset has more apps than expected - not ideal
|
||||
}
|
||||
if appStatus.Status != expectedstatus.Status {
|
||||
return pending, fmt.Sprintf("for application '%s': expected status '%s' but got '%s'", expectedstatus.Application, expectedstatus.Status, appStatus.Status)
|
||||
}
|
||||
}
|
||||
return succeeded, fmt.Sprintf("all applications in ApplicationSet's: '%s' Application Status have expected statuses ", c.context.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
// CheckApplicationInRightSteps checks that a step contains exactly the expected applications.
|
||||
func CheckApplicationInRightSteps(step string, expectedApps []string) Expectation {
|
||||
return func(c *Consequences) (state, string) {
|
||||
appSet := c.applicationSet(c.context.GetName())
|
||||
if appSet == nil {
|
||||
return pending, fmt.Sprintf("no application set found with name '%s'", c.context.GetName())
|
||||
}
|
||||
if appSet.Status.ApplicationStatus == nil {
|
||||
return pending, fmt.Sprintf("no application status found for ApplicationSet '%s'", c.context.GetName())
|
||||
}
|
||||
var stepApps []string
|
||||
for _, appStatus := range appSet.Status.ApplicationStatus {
|
||||
if appStatus.Step == step {
|
||||
stepApps = append(stepApps, appStatus.Application)
|
||||
}
|
||||
}
|
||||
if len(stepApps) != len(expectedApps) {
|
||||
return pending, fmt.Sprintf("expected %d apps in step '%s' for appset '%s', but got %d", len(expectedApps), step, c.context.GetName(), len(stepApps))
|
||||
}
|
||||
// Sort before comparing to avoid flakiness
|
||||
slices.Sort(stepApps)
|
||||
slices.Sort(expectedApps)
|
||||
if !slices.Equal(stepApps, expectedApps) {
|
||||
return pending, fmt.Sprintf("In step '%s', expected apps: '%s', but got: '%s'", step, expectedApps, stepApps)
|
||||
}
|
||||
return succeeded, fmt.Sprintf("Step '%s' has expected apps: '%s'", step, expectedApps)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplicationSetDoesNotHaveApplicationStatus checks that ApplicationSet.Status.ApplicationStatus is nil
|
||||
func ApplicationSetDoesNotHaveApplicationStatus() Expectation {
|
||||
return func(c *Consequences) (state, string) {
|
||||
appSet := c.applicationSet(c.context.GetName())
|
||||
if appSet == nil {
|
||||
return pending, fmt.Sprintf("no application set found with name '%s'", c.context.GetName())
|
||||
}
|
||||
if appSet.Status.ApplicationStatus != nil {
|
||||
return failed, fmt.Sprintf("application set '%s' has ApplicationStatus when not expected", c.context.GetName())
|
||||
}
|
||||
return succeeded, fmt.Sprintf("Application '%s' does not have ApplicationStatus", c.context.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
// ApplicationSetHasApplicationStatus checks that ApplicationSet has expected number of applications in its status
|
||||
// and all have progressive sync status Healthy.
|
||||
func ApplicationSetHasApplicationStatus(expectedApplicationStatusLength int) Expectation {
|
||||
return func(c *Consequences) (state, string) {
|
||||
appSet := c.applicationSet(c.context.GetName())
|
||||
if appSet == nil {
|
||||
return pending, fmt.Sprintf("no application set found with name '%s'", c.context.GetName())
|
||||
}
|
||||
if appSet.Status.ApplicationStatus == nil {
|
||||
return pending, fmt.Sprintf("application set '%s' has no ApplicationStatus when '%d' expected", c.context.GetName(), expectedApplicationStatusLength)
|
||||
}
|
||||
|
||||
if len(appSet.Status.ApplicationStatus) != expectedApplicationStatusLength {
|
||||
return failed, fmt.Sprintf("applicationset has '%d' applicationstatus, when '%d' are expected", len(appSet.Status.ApplicationStatus), expectedApplicationStatusLength)
|
||||
}
|
||||
|
||||
for _, appStatus := range appSet.Status.ApplicationStatus {
|
||||
if appStatus.Status != v1alpha1.ProgressiveSyncHealthy {
|
||||
return pending, fmt.Sprintf("Application '%s' not Healthy", appStatus.Application)
|
||||
}
|
||||
}
|
||||
return succeeded, fmt.Sprintf("All Applications in ApplicationSet: '%s' are Healthy ", c.context.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
argoexec "github.com/argoproj/argo-cd/v3/util/exec"
|
||||
)
|
||||
|
||||
func Run(workDir, name string, args ...string) (string, error) {
|
||||
return RunWithStdin("", workDir, name, args...)
|
||||
}
|
||||
|
||||
func RunWithStdin(stdin, workDir, name string, args ...string) (string, error) {
|
||||
cmd := exec.CommandContext(context.Background(), name, args...)
|
||||
if stdin != "" {
|
||||
cmd.Stdin = strings.NewReader(stdin)
|
||||
}
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Dir = workDir
|
||||
|
||||
return argoexec.RunCommandExt(cmd, argoexec.CmdOpts{})
|
||||
}
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
)
|
||||
|
||||
type ExternalNamespace string
|
||||
@@ -49,7 +48,6 @@ const (
|
||||
// Note: this is NOT the namespace the ApplicationSet controller is deployed to; see ArgoCDNamespace.
|
||||
ApplicationsResourcesNamespace = "applicationset-e2e"
|
||||
|
||||
TmpDir = "/tmp/applicationset-e2e"
|
||||
TestingLabel = "e2e.argoproj.io"
|
||||
)
|
||||
|
||||
@@ -141,20 +139,20 @@ func EnsureCleanState(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
func() error {
|
||||
// Delete the argocd-e2e-external namespace, if it exists
|
||||
err := fixtureClient.KubeClientset.CoreV1().Namespaces().Delete(t.Context(), string(ArgoCDExternalNamespace), metav1.DeleteOptions{PropagationPolicy: &policy})
|
||||
if err != nil && !apierrors.IsNotFound(err) { // 'not found' error is expected
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
// Clean up ApplicationSets in argocd-e2e-external namespace (don't delete the namespace itself as it's shared)
|
||||
return fixtureClient.ExternalAppSetClientsets[ArgoCDExternalNamespace].DeleteCollection(t.Context(), metav1.DeleteOptions{PropagationPolicy: &policy}, metav1.ListOptions{})
|
||||
},
|
||||
func() error {
|
||||
// Delete the argocd-e2e-external namespace, if it exists
|
||||
err := fixtureClient.KubeClientset.CoreV1().Namespaces().Delete(t.Context(), string(ArgoCDExternalNamespace2), metav1.DeleteOptions{PropagationPolicy: &policy})
|
||||
if err != nil && !apierrors.IsNotFound(err) { // 'not found' error is expected
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
// Clean up ApplicationSets in argocd-e2e-external-2 namespace (don't delete the namespace itself as it's shared)
|
||||
return fixtureClient.ExternalAppSetClientsets[ArgoCDExternalNamespace2].DeleteCollection(t.Context(), metav1.DeleteOptions{PropagationPolicy: &policy}, metav1.ListOptions{})
|
||||
},
|
||||
func() error {
|
||||
// Clean up Applications in argocd-e2e-external namespace
|
||||
return fixtureClient.AppClientset.ArgoprojV1alpha1().Applications(string(ArgoCDExternalNamespace)).DeleteCollection(t.Context(), metav1.DeleteOptions{PropagationPolicy: &policy}, metav1.ListOptions{})
|
||||
},
|
||||
func() error {
|
||||
// Clean up Applications in argocd-e2e-external-2 namespace
|
||||
return fixtureClient.AppClientset.ArgoprojV1alpha1().Applications(string(ArgoCDExternalNamespace2)).DeleteCollection(t.Context(), metav1.DeleteOptions{PropagationPolicy: &policy}, metav1.ListOptions{})
|
||||
},
|
||||
// delete resources
|
||||
func() error {
|
||||
@@ -209,12 +207,6 @@ func EnsureCleanState(t *testing.T) {
|
||||
|
||||
require.NoError(t, waitForExpectedClusterState(t))
|
||||
|
||||
// remove tmp dir
|
||||
require.NoError(t, os.RemoveAll(TmpDir))
|
||||
|
||||
// create tmp dir
|
||||
errors.NewHandler(t).FailOnErr(Run("", "mkdir", "-p", TmpDir))
|
||||
|
||||
// We can switch user and as result in previous state we will have non-admin user, this case should be reset
|
||||
require.NoError(t, fixture.LoginAs("admin"))
|
||||
|
||||
@@ -265,7 +257,9 @@ func waitForExpectedClusterState(t *testing.T) error {
|
||||
}
|
||||
|
||||
// Wait up to 120 seconds for namespace to not exist
|
||||
for _, namespace := range []string{string(ApplicationsResourcesNamespace), string(ArgoCDExternalNamespace), string(ArgoCDExternalNamespace2)} {
|
||||
// Note: We only check ApplicationsResourcesNamespace - the external namespaces (argocd-e2e-external*)
|
||||
// are shared infrastructure and persist throughout the test suite
|
||||
for _, namespace := range []string{string(ApplicationsResourcesNamespace)} {
|
||||
// Wait up to 120 seconds for namespace to not exist
|
||||
if err := waitForSuccess(func() error {
|
||||
return cleanUpNamespace(fixtureClient, namespace)
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
|
||||
// Add a custom CA certificate to the test and also create the certificate file
|
||||
// on the file system, so argocd-server and argocd-repo-server can use it.
|
||||
// TODO: Should be moved to the EnsureCleanState since this acts on the controller
|
||||
// globally https://github.com/argoproj/argo-cd/issues/24307
|
||||
func AddCustomCACert(t *testing.T) {
|
||||
t.Helper()
|
||||
caCertPath, err := filepath.Abs("../fixture/certs/argocd-test-ca.crt")
|
||||
@@ -21,18 +23,18 @@ func AddCustomCACert(t *testing.T) {
|
||||
// against a local workload (repositories available as localhost) and
|
||||
// against remote workloads (repositories available as argocd-e2e-server)
|
||||
if fixture.IsLocal() {
|
||||
args := []string{"cert", "add-tls", "localhost", "--from", caCertPath}
|
||||
args := []string{"cert", "add-tls", "localhost", "--upsert", "--from", caCertPath}
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunCli(args...))
|
||||
args = []string{"cert", "add-tls", "127.0.0.1", "--from", caCertPath}
|
||||
args = []string{"cert", "add-tls", "127.0.0.1", "--upsert", "--from", caCertPath}
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunCli(args...))
|
||||
certData, err := os.ReadFile(caCertPath)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(fixture.TmpDir+"/app/config/tls/localhost", certData, 0o644)
|
||||
err = os.WriteFile(fixture.TmpDir()+"/app/config/tls/localhost", certData, 0o644)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(fixture.TmpDir+"/app/config/tls/127.0.0.1", certData, 0o644)
|
||||
err = os.WriteFile(fixture.TmpDir()+"/app/config/tls/127.0.0.1", certData, 0o644)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
args := []string{"cert", "add-tls", "argocd-e2e-server", "--from", caCertPath}
|
||||
args := []string{"cert", "add-tls", "argocd-e2e-server", "--upsert", "--from", caCertPath}
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunCli(args...))
|
||||
fixture.RestartAPIServer(t)
|
||||
fixture.RestartRepoServer(t)
|
||||
@@ -56,7 +58,7 @@ func AddCustomSSHKnownHostsKeys(t *testing.T) {
|
||||
if fixture.IsLocal() {
|
||||
knownHostsData, err := os.ReadFile(knownHostsPath)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(fixture.TmpDir+"/app/config/ssh/ssh_known_hosts", knownHostsData, 0o644)
|
||||
err = os.WriteFile(fixture.TmpDir()+"/app/config/ssh/ssh_known_hosts", knownHostsData, 0o644)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
fixture.RestartAPIServer(t)
|
||||
|
||||
@@ -4,11 +4,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
@@ -45,7 +47,7 @@ func (a *Actions) Create() *Actions {
|
||||
_, err := clusterClient.Create(context.Background(), &clusterpkg.ClusterCreateRequest{
|
||||
Cluster: &v1alpha1.Cluster{
|
||||
Server: a.context.server,
|
||||
Name: a.context.name,
|
||||
Name: a.context.GetName(),
|
||||
Config: v1alpha1.ClusterConfig{BearerToken: a.context.bearerToken},
|
||||
ConnectionState: v1alpha1.ConnectionState{},
|
||||
ServerVersion: "",
|
||||
@@ -89,54 +91,106 @@ func (a *Actions) CreateWithRBAC() *Actions {
|
||||
return a
|
||||
}
|
||||
|
||||
// Create a kubeconfig with the current cluster name
|
||||
err = a.createKubeconfigForCluster(config, a.context.GetName())
|
||||
if err != nil {
|
||||
a.lastError = err
|
||||
return a
|
||||
}
|
||||
|
||||
return a.Create()
|
||||
}
|
||||
|
||||
// Helper function to create a kubeconfig file with the given cluster name
|
||||
func (a *Actions) createKubeconfigForCluster(config *clientcmdapi.Config, newClusterName string) error {
|
||||
// Get the current context
|
||||
currentContext := config.Contexts[config.CurrentContext]
|
||||
if currentContext == nil {
|
||||
return errors.New("no current context found")
|
||||
}
|
||||
|
||||
// Get the original cluster
|
||||
originalCluster := config.Clusters[currentContext.Cluster]
|
||||
if originalCluster == nil {
|
||||
return errors.New("cluster not found in config")
|
||||
}
|
||||
|
||||
// Create a new cluster entry with the same config but different name
|
||||
newCluster := originalCluster.DeepCopy()
|
||||
config.Clusters[newClusterName] = newCluster
|
||||
|
||||
// Create a new context pointing to the new cluster
|
||||
newContext := currentContext.DeepCopy()
|
||||
newContext.Cluster = newClusterName
|
||||
config.Contexts[newClusterName] = newContext
|
||||
|
||||
// Set the new context as current
|
||||
config.CurrentContext = newClusterName
|
||||
|
||||
// Write to a temporary kubeconfig file
|
||||
tmpFile, err := os.CreateTemp("", "kubeconfig-*.yaml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
// Write the modified config to the temp file
|
||||
if err := clientcmd.WriteToFile(*config, tmpFile.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the KUBECONFIG environment variable to use this temp file
|
||||
// This will be use by subsequent kubectl/argocd commands to connect to the cluster
|
||||
a.context.T().Setenv("KUBECONFIG", tmpFile.Name())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Actions) List() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("cluster", "list")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Get() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("cluster", "get", a.context.server)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) GetByName(name string) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.runCli("cluster", "get", name)
|
||||
func (a *Actions) GetByName() *Actions {
|
||||
a.context.T().Helper()
|
||||
a.runCli("cluster", "get", a.context.GetName())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) SetNamespaces() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.runCli("cluster", "set", a.context.name, "--namespace", strings.Join(a.context.namespaces, ","))
|
||||
a.context.T().Helper()
|
||||
a.runCli("cluster", "set", a.context.GetName(), "--namespace", strings.Join(a.context.namespaces, ","))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteByName() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
a.runCli("cluster", "rm", a.context.name, "--yes")
|
||||
a.runCli("cluster", "rm", a.context.GetName(), "--yes")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) DeleteByServer() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
|
||||
a.runCli("cluster", "rm", a.context.server, "--yes")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
}
|
||||
|
||||
@@ -21,13 +21,13 @@ func (c *Consequences) Expect() *Consequences {
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(cluster *v1alpha1.Cluster, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.cluster())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
name string
|
||||
*fixture.TestState
|
||||
|
||||
project string
|
||||
server string
|
||||
upsert bool
|
||||
@@ -20,21 +21,19 @@ type Context struct {
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(t)
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(state)
|
||||
}
|
||||
|
||||
func GivenWithSameState(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
return &Context{t: t, name: fixture.Name(), project: "default"}
|
||||
}
|
||||
|
||||
func (c *Context) GetName() string {
|
||||
return c.name
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx), project: "default"}
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.name = name
|
||||
c.SetName(name)
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
119
test/e2e/fixture/context.go
Normal file
119
test/e2e/fixture/context.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package fixture
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/rand"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
)
|
||||
|
||||
// TestContext defines the interface for test-specific state that enables parallel test execution.
|
||||
// All fixture Context types should implement this interface by embedding TestState.
|
||||
type TestContext interface {
|
||||
// SetName sets the DNS-friendly name for this context
|
||||
SetName(name string)
|
||||
// GetName returns the DNS-friendly name for this context
|
||||
GetName() string
|
||||
// DeploymentNamespace returns the namespace where test resources are deployed
|
||||
DeploymentNamespace() string
|
||||
// ID returns the unique identifier for this test run
|
||||
ID() string
|
||||
// ShortID returns the short unique identifier suffix for this test run
|
||||
ShortID() string
|
||||
// Token returns the authentication token for API calls
|
||||
Token() string
|
||||
// SetToken sets the authentication token
|
||||
SetToken(token string)
|
||||
// T returns the testing.T instance for this test
|
||||
T() *testing.T
|
||||
}
|
||||
|
||||
// TestState holds test-specific variables that were previously global.
|
||||
// Embed this in Context structs to enable parallel test execution.
|
||||
type TestState struct {
|
||||
t *testing.T
|
||||
id string
|
||||
shortId string
|
||||
name string
|
||||
deploymentNamespace string
|
||||
token string
|
||||
}
|
||||
|
||||
// NewTestState creates a new TestState with unique identifiers for this test run.
|
||||
// This generates fresh id, name, and deploymentNamespace values.
|
||||
func NewTestState(t *testing.T) *TestState {
|
||||
t.Helper()
|
||||
randString, err := rand.String(5)
|
||||
errors.CheckError(err)
|
||||
shortId := strings.ToLower(randString)
|
||||
|
||||
return &TestState{
|
||||
t: t,
|
||||
token: token, // Initialize with current global token
|
||||
id: fmt.Sprintf("%s-%s", t.Name(), shortId),
|
||||
shortId: shortId,
|
||||
name: DnsFriendly(t.Name(), "-"+shortId),
|
||||
deploymentNamespace: DnsFriendly("argocd-e2e-"+t.Name(), "-"+shortId),
|
||||
}
|
||||
}
|
||||
|
||||
// NewTestStateFromContext creates a TestState from an existing TestContext.
|
||||
// This allows GivenWithSameState functions to work across different Context types.
|
||||
func NewTestStateFromContext(ctx TestContext) *TestState {
|
||||
return &TestState{
|
||||
t: ctx.T(),
|
||||
id: ctx.ID(),
|
||||
shortId: ctx.ShortID(),
|
||||
name: ctx.GetName(),
|
||||
deploymentNamespace: ctx.DeploymentNamespace(),
|
||||
token: ctx.Token(),
|
||||
}
|
||||
}
|
||||
|
||||
// Name sets the DNS-friendly name for this context
|
||||
func (s *TestState) SetName(name string) {
|
||||
if name == "" {
|
||||
s.name = ""
|
||||
return
|
||||
}
|
||||
suffix := "-" + s.shortId
|
||||
s.name = DnsFriendly(strings.TrimSuffix(name, suffix), suffix)
|
||||
}
|
||||
|
||||
// GetName returns the DNS-friendly name for this context
|
||||
func (s *TestState) GetName() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// DeploymentNamespace returns the namespace where test resources are deployed
|
||||
func (s *TestState) DeploymentNamespace() string {
|
||||
return s.deploymentNamespace
|
||||
}
|
||||
|
||||
// ID returns the unique identifier for this test run
|
||||
func (s *TestState) ID() string {
|
||||
return s.id
|
||||
}
|
||||
|
||||
// ShortID returns the short unique identifier suffix for this test run
|
||||
func (s *TestState) ShortID() string {
|
||||
return s.shortId
|
||||
}
|
||||
|
||||
// Token returns the authentication token for API calls
|
||||
func (s *TestState) Token() string {
|
||||
return s.token
|
||||
}
|
||||
|
||||
// SetToken sets the authentication token
|
||||
func (s *TestState) SetToken(token string) {
|
||||
s.token = token
|
||||
}
|
||||
|
||||
// T returns the testing.T instance for this test
|
||||
func (s *TestState) T() *testing.T {
|
||||
return s.t
|
||||
}
|
||||
@@ -14,17 +14,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
@@ -36,7 +38,6 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
grpcutil "github.com/argoproj/argo-cd/v3/util/grpc"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
"github.com/argoproj/argo-cd/v3/util/rand"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
@@ -53,7 +54,8 @@ const (
|
||||
defaultNotificationServer = "localhost:9001"
|
||||
|
||||
// ensure all repos are in one directory tree, so we can easily clean them up
|
||||
TmpDir = "/tmp/argo-e2e"
|
||||
// TmpDir can be overridden via ARGOCD_E2E_DIR environment variable
|
||||
defaultTmpDir = "/tmp/argo-e2e"
|
||||
repoDir = "testdata.git"
|
||||
submoduleDir = "submodule.git"
|
||||
submoduleParentDir = "submoduleParent.git"
|
||||
@@ -65,7 +67,9 @@ const (
|
||||
// cmp plugin sock file path
|
||||
PluginSockFilePath = "/app/config/plugin"
|
||||
|
||||
E2ETestPrefix = "e2e-test-"
|
||||
// finalizer to add to resources during tests. Make sure that the resource Kind of your object
|
||||
// is included in the test EnsureCleanState code
|
||||
TestFinalizer = TestingLabel + "/finalizer"
|
||||
|
||||
// Account for batch events processing (set to 1ms in e2e tests)
|
||||
WhenThenSleepInterval = 5 * time.Millisecond
|
||||
@@ -82,9 +86,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
id string
|
||||
deploymentNamespace string
|
||||
name string
|
||||
KubeClientset kubernetes.Interface
|
||||
KubeConfig *rest.Config
|
||||
DynamicClientset dynamic.Interface
|
||||
@@ -148,6 +149,12 @@ func AppNamespace() string {
|
||||
return GetEnvWithDefault("ARGOCD_E2E_APP_NAMESPACE", ArgoCDAppNamespace)
|
||||
}
|
||||
|
||||
// TmpDir returns the base directory for e2e test data.
|
||||
// It can be overridden via the ARGOCD_E2E_DIR environment variable.
|
||||
func TmpDir() string {
|
||||
return GetEnvWithDefault("ARGOCD_E2E_DIR", defaultTmpDir)
|
||||
}
|
||||
|
||||
// getKubeConfig creates new kubernetes client config using specified config path and config overrides variables
|
||||
func getKubeConfig(configPath string, overrides clientcmd.ConfigOverrides) *rest.Config {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
@@ -290,20 +297,16 @@ func LoginAs(username string) error {
|
||||
return loginAs(username, password)
|
||||
}
|
||||
|
||||
func Name() string {
|
||||
return name
|
||||
}
|
||||
|
||||
func repoDirectory() string {
|
||||
return path.Join(TmpDir, repoDir)
|
||||
return path.Join(TmpDir(), repoDir)
|
||||
}
|
||||
|
||||
func submoduleDirectory() string {
|
||||
return path.Join(TmpDir, submoduleDir)
|
||||
return path.Join(TmpDir(), submoduleDir)
|
||||
}
|
||||
|
||||
func submoduleParentDirectory() string {
|
||||
return path.Join(TmpDir, submoduleParentDir)
|
||||
return path.Join(TmpDir(), submoduleParentDir)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -320,16 +323,18 @@ const (
|
||||
)
|
||||
|
||||
func RepoURL(urlType RepoURLType) string {
|
||||
// SSH URLs use the container path (defaultTmpDir) because sshd runs inside Docker
|
||||
// where $ARGOCD_E2E_DIR is mounted to /tmp/argo-e2e
|
||||
switch urlType {
|
||||
// Git server via SSH
|
||||
case RepoURLTypeSSH:
|
||||
return GetEnvWithDefault(EnvRepoURLTypeSSH, "ssh://root@localhost:2222/tmp/argo-e2e/testdata.git")
|
||||
return GetEnvWithDefault(EnvRepoURLTypeSSH, "ssh://root@localhost:2222"+defaultTmpDir+"/testdata.git")
|
||||
// Git submodule repo
|
||||
case RepoURLTypeSSHSubmodule:
|
||||
return GetEnvWithDefault(EnvRepoURLTypeSSHSubmodule, "ssh://root@localhost:2222/tmp/argo-e2e/submodule.git")
|
||||
return GetEnvWithDefault(EnvRepoURLTypeSSHSubmodule, "ssh://root@localhost:2222"+defaultTmpDir+"/submodule.git")
|
||||
// Git submodule parent repo
|
||||
case RepoURLTypeSSHSubmoduleParent:
|
||||
return GetEnvWithDefault(EnvRepoURLTypeSSHSubmoduleParent, "ssh://root@localhost:2222/tmp/argo-e2e/submoduleParent.git")
|
||||
return GetEnvWithDefault(EnvRepoURLTypeSSHSubmoduleParent, "ssh://root@localhost:2222"+defaultTmpDir+"/submoduleParent.git")
|
||||
// Git server via HTTPS
|
||||
case RepoURLTypeHTTPS:
|
||||
return GetEnvWithDefault(EnvRepoURLTypeHTTPS, "https://localhost:9443/argo-e2e/testdata.git")
|
||||
@@ -363,10 +368,6 @@ func RepoBaseURL(urlType RepoURLType) string {
|
||||
return path.Base(RepoURL(urlType))
|
||||
}
|
||||
|
||||
func DeploymentNamespace() string {
|
||||
return deploymentNamespace
|
||||
}
|
||||
|
||||
// Convenience wrapper for updating argocd-cm
|
||||
func updateSettingConfigMap(updater func(cm *corev1.ConfigMap) error) error {
|
||||
return updateGenericConfigMap(common.ArgoCDConfigMapName, updater)
|
||||
@@ -478,50 +479,6 @@ func SetImpersonationEnabled(impersonationEnabledFlag string) error {
|
||||
})
|
||||
}
|
||||
|
||||
func CreateRBACResourcesForImpersonation(serviceAccountName string, policyRules []rbacv1.PolicyRule) error {
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
},
|
||||
}
|
||||
_, err := KubeClientset.CoreV1().ServiceAccounts(DeploymentNamespace()).Create(context.Background(), sa, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "role"),
|
||||
},
|
||||
Rules: policyRules,
|
||||
}
|
||||
_, err = KubeClientset.RbacV1().Roles(DeploymentNamespace()).Create(context.Background(), role, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rolebinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "rolebinding"),
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: fmt.Sprintf("%s-%s", serviceAccountName, "role"),
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccountName,
|
||||
Namespace: DeploymentNamespace(),
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = KubeClientset.RbacV1().RoleBindings(DeploymentNamespace()).Create(context.Background(), rolebinding, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetResourceOverridesSplitKeys(overrides map[string]v1alpha1.ResourceOverride) error {
|
||||
return updateSettingConfigMap(func(cm *corev1.ConfigMap) error {
|
||||
for k, v := range overrides {
|
||||
@@ -648,7 +605,7 @@ func WithTestData(testdata string) TestOption {
|
||||
}
|
||||
}
|
||||
|
||||
func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
func EnsureCleanState(t *testing.T, opts ...TestOption) *TestState {
|
||||
t.Helper()
|
||||
opt := newTestOption(opts...)
|
||||
// In large scenarios, we can skip tests that already run
|
||||
@@ -658,9 +615,51 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
RecordTestRun(t)
|
||||
})
|
||||
|
||||
// Create TestState to hold test-specific variables
|
||||
state := NewTestState(t)
|
||||
|
||||
start := time.Now()
|
||||
policy := metav1.DeletePropagationBackground
|
||||
|
||||
deleteNamespaces := func(namespaces []corev1.Namespace, wait bool) error {
|
||||
args := []string{"delete", "ns", "--ignore-not-found=true", fmt.Sprintf("--wait=%t", wait)}
|
||||
for _, namespace := range namespaces {
|
||||
args = append(args, namespace.Name)
|
||||
}
|
||||
_, err := Run("", "kubectl", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
deleteResourceWithTestFinalizer := func(namespaces []corev1.Namespace, gvrs []schema.GroupVersionResource) error {
|
||||
for _, namespace := range namespaces {
|
||||
for _, gvr := range gvrs {
|
||||
objects, err := DynamicClientset.Resource(gvr).Namespace(namespace.GetName()).List(t.Context(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range objects.Items {
|
||||
obj := &objects.Items[i]
|
||||
updated := controllerutil.RemoveFinalizer(obj, TestFinalizer)
|
||||
if updated {
|
||||
log.WithFields(log.Fields{
|
||||
"namespace": namespace.GetName(),
|
||||
"resource": gvr,
|
||||
"name": obj.GetName(),
|
||||
}).Info("removing test finalizer")
|
||||
_, err := DynamicClientset.Resource(gvr).Namespace(namespace.GetName()).Update(t.Context(), obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
RunFunctionsInParallelAndCheckErrors(t, []func() error{
|
||||
func() error {
|
||||
// kubectl delete apps ...
|
||||
@@ -697,6 +696,20 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
metav1.DeleteOptions{PropagationPolicy: &policy},
|
||||
metav1.ListOptions{LabelSelector: common.LabelKeySecretType + "=" + common.LabelValueSecretTypeRepoCreds})
|
||||
},
|
||||
func() error {
|
||||
// kubectl delete secrets -l argocd.argoproj.io/secret-type=repository-write
|
||||
return KubeClientset.CoreV1().Secrets(TestNamespace()).DeleteCollection(
|
||||
t.Context(),
|
||||
metav1.DeleteOptions{PropagationPolicy: &policy},
|
||||
metav1.ListOptions{LabelSelector: common.LabelKeySecretType + "=" + common.LabelValueSecretTypeRepositoryWrite})
|
||||
},
|
||||
func() error {
|
||||
// kubectl delete secrets -l argocd.argoproj.io/secret-type=repo-write-creds
|
||||
return KubeClientset.CoreV1().Secrets(TestNamespace()).DeleteCollection(
|
||||
t.Context(),
|
||||
metav1.DeleteOptions{PropagationPolicy: &policy},
|
||||
metav1.ListOptions{LabelSelector: common.LabelKeySecretType + "=" + common.LabelValueSecretTypeRepoCredsWrite})
|
||||
},
|
||||
func() error {
|
||||
// kubectl delete secrets -l argocd.argoproj.io/secret-type=cluster
|
||||
return KubeClientset.CoreV1().Secrets(TestNamespace()).DeleteCollection(
|
||||
@@ -711,8 +724,21 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
metav1.DeleteOptions{PropagationPolicy: &policy},
|
||||
metav1.ListOptions{LabelSelector: TestingLabel + "=true"})
|
||||
},
|
||||
func() error {
|
||||
// kubectl delete clusterroles -l e2e.argoproj.io=true
|
||||
return KubeClientset.RbacV1().ClusterRoles().DeleteCollection(
|
||||
t.Context(),
|
||||
metav1.DeleteOptions{PropagationPolicy: &policy},
|
||||
metav1.ListOptions{LabelSelector: TestingLabel + "=true"})
|
||||
},
|
||||
func() error {
|
||||
// kubectl delete clusterrolebindings -l e2e.argoproj.io=true
|
||||
return KubeClientset.RbacV1().ClusterRoleBindings().DeleteCollection(
|
||||
t.Context(),
|
||||
metav1.DeleteOptions{PropagationPolicy: &policy},
|
||||
metav1.ListOptions{LabelSelector: TestingLabel + "=true"})
|
||||
},
|
||||
})
|
||||
|
||||
RunFunctionsInParallelAndCheckErrors(t, []func() error{
|
||||
func() error {
|
||||
// delete old namespaces which were created by tests
|
||||
@@ -720,37 +746,33 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
t.Context(),
|
||||
metav1.ListOptions{
|
||||
LabelSelector: TestingLabel + "=true",
|
||||
FieldSelector: "status.phase=Active",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(namespaces.Items) > 0 {
|
||||
args := []string{"delete", "ns", "--wait=false"}
|
||||
for _, namespace := range namespaces.Items {
|
||||
args = append(args, namespace.Name)
|
||||
}
|
||||
_, err := Run("", "kubectl", args...)
|
||||
err = deleteNamespaces(namespaces.Items, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
namespaces, err = KubeClientset.CoreV1().Namespaces().List(t.Context(), metav1.ListOptions{})
|
||||
// Get all namespaces stuck in Terminating state
|
||||
terminatingNamespaces, err := KubeClientset.CoreV1().Namespaces().List(
|
||||
t.Context(),
|
||||
metav1.ListOptions{
|
||||
LabelSelector: TestingLabel + "=true",
|
||||
FieldSelector: "status.phase=Terminating",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
testNamespaceNames := []string{}
|
||||
for _, namespace := range namespaces.Items {
|
||||
if strings.HasPrefix(namespace.Name, E2ETestPrefix) {
|
||||
testNamespaceNames = append(testNamespaceNames, namespace.Name)
|
||||
}
|
||||
}
|
||||
if len(testNamespaceNames) > 0 {
|
||||
args := []string{"delete", "ns"}
|
||||
args = append(args, testNamespaceNames...)
|
||||
_, err := Run("", "kubectl", args...)
|
||||
if len(terminatingNamespaces.Items) > 0 {
|
||||
err = deleteResourceWithTestFinalizer(terminatingNamespaces.Items, []schema.GroupVersionResource{
|
||||
// If finalizers are added to new resource kinds, they must be added here for a proper cleanup
|
||||
appsv1.SchemeGroupVersion.WithResource("deployments"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -762,70 +784,6 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
_, err := Run("", "kubectl", "delete", "crd", "-l", TestingLabel+"=true", "--wait=false")
|
||||
return err
|
||||
},
|
||||
func() error {
|
||||
// delete old ClusterRoles which were created by tests
|
||||
clusterRoles, err := KubeClientset.RbacV1().ClusterRoles().List(
|
||||
t.Context(),
|
||||
metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", TestingLabel, "true"),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(clusterRoles.Items) > 0 {
|
||||
args := []string{"delete", "clusterrole", "--wait=false"}
|
||||
for _, clusterRole := range clusterRoles.Items {
|
||||
args = append(args, clusterRole.Name)
|
||||
}
|
||||
_, err := Run("", "kubectl", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
clusterRoles, err = KubeClientset.RbacV1().ClusterRoles().List(t.Context(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
testClusterRoleNames := []string{}
|
||||
for _, clusterRole := range clusterRoles.Items {
|
||||
if strings.HasPrefix(clusterRole.Name, E2ETestPrefix) {
|
||||
testClusterRoleNames = append(testClusterRoleNames, clusterRole.Name)
|
||||
}
|
||||
}
|
||||
if len(testClusterRoleNames) > 0 {
|
||||
args := []string{"delete", "clusterrole"}
|
||||
args = append(args, testClusterRoleNames...)
|
||||
_, err := Run("", "kubectl", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func() error {
|
||||
// delete old ClusterRoleBindings which were created by tests
|
||||
clusterRoleBindings, err := KubeClientset.RbacV1().ClusterRoleBindings().List(t.Context(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
testClusterRoleBindingNames := []string{}
|
||||
for _, clusterRoleBinding := range clusterRoleBindings.Items {
|
||||
if strings.HasPrefix(clusterRoleBinding.Name, E2ETestPrefix) {
|
||||
testClusterRoleBindingNames = append(testClusterRoleBindingNames, clusterRoleBinding.Name)
|
||||
}
|
||||
}
|
||||
if len(testClusterRoleBindingNames) > 0 {
|
||||
args := []string{"delete", "clusterrolebinding"}
|
||||
args = append(args, testClusterRoleBindingNames...)
|
||||
_, err := Run("", "kubectl", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func() error {
|
||||
err := updateSettingConfigMap(func(cm *corev1.ConfigMap) error {
|
||||
cm.Data = map[string]string{}
|
||||
@@ -893,38 +851,39 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
return err
|
||||
},
|
||||
func() error {
|
||||
err := os.RemoveAll(TmpDir)
|
||||
tmpDir := TmpDir()
|
||||
err := os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "mkdir", "-p", TmpDir)
|
||||
_, err = Run("", "mkdir", "-p", tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create TLS and SSH certificate directories
|
||||
if IsLocal() {
|
||||
_, err = Run("", "mkdir", "-p", TmpDir+"/app/config/tls")
|
||||
_, err = Run("", "mkdir", "-p", tmpDir+"/app/config/tls")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "mkdir", "-p", TmpDir+"/app/config/ssh")
|
||||
_, err = Run("", "mkdir", "-p", tmpDir+"/app/config/ssh")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// For signing during the tests
|
||||
_, err = Run("", "mkdir", "-p", TmpDir+"/gpg")
|
||||
_, err = Run("", "mkdir", "-p", tmpDir+"/gpg")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "chmod", "0700", TmpDir+"/gpg")
|
||||
_, err = Run("", "chmod", "0700", tmpDir+"/gpg")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevGnuPGHome := os.Getenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", TmpDir+"/gpg")
|
||||
t.Setenv("GNUPGHOME", tmpDir+"/gpg")
|
||||
//nolint:errcheck
|
||||
Run("", "pkill", "-9", "gpg-agent")
|
||||
_, err = Run("", "gpg", "--import", "../fixture/gpg/signingkey.asc")
|
||||
@@ -935,23 +894,23 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
|
||||
// recreate GPG directories
|
||||
if IsLocal() {
|
||||
_, err = Run("", "mkdir", "-p", TmpDir+"/app/config/gpg/source")
|
||||
_, err = Run("", "mkdir", "-p", tmpDir+"/app/config/gpg/source")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "mkdir", "-p", TmpDir+"/app/config/gpg/keys")
|
||||
_, err = Run("", "mkdir", "-p", tmpDir+"/app/config/gpg/keys")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "chmod", "0700", TmpDir+"/app/config/gpg/keys")
|
||||
_, err = Run("", "chmod", "0700", tmpDir+"/app/config/gpg/keys")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "mkdir", "-p", TmpDir+PluginSockFilePath)
|
||||
_, err = Run("", "mkdir", "-p", tmpDir+PluginSockFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "chmod", "0700", TmpDir+PluginSockFilePath)
|
||||
_, err = Run("", "chmod", "0700", tmpDir+PluginSockFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -998,21 +957,12 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
return nil
|
||||
},
|
||||
func() error {
|
||||
// random id - unique across test runs
|
||||
randString, err := rand.String(5)
|
||||
// create namespace for this test
|
||||
_, err := Run("", "kubectl", "create", "ns", state.deploymentNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
postFix := "-" + strings.ToLower(randString)
|
||||
id = t.Name() + postFix
|
||||
name = DnsFriendly(t.Name(), "")
|
||||
deploymentNamespace = DnsFriendly("argocd-e2e-"+t.Name(), postFix)
|
||||
// create namespace
|
||||
_, err = Run("", "kubectl", "create", "ns", DeploymentNamespace())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = Run("", "kubectl", "label", "ns", DeploymentNamespace(), TestingLabel+"=true")
|
||||
_, err = Run("", "kubectl", "label", "ns", state.deploymentNamespace, TestingLabel+"=true")
|
||||
return err
|
||||
},
|
||||
})
|
||||
@@ -1020,10 +970,12 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) {
|
||||
log.WithFields(log.Fields{
|
||||
"duration": time.Since(start),
|
||||
"name": t.Name(),
|
||||
"id": id,
|
||||
"id": state.id,
|
||||
"username": "admin",
|
||||
"password": "password",
|
||||
}).Info("clean state")
|
||||
|
||||
return state
|
||||
}
|
||||
|
||||
// RunCliWithRetry executes an Argo CD CLI command with retry logic.
|
||||
@@ -1152,7 +1104,7 @@ func AddSignedFile(t *testing.T, path, contents string) {
|
||||
WriteFile(t, path, contents)
|
||||
|
||||
prevGnuPGHome := os.Getenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", TmpDir+"/gpg")
|
||||
t.Setenv("GNUPGHOME", TmpDir()+"/gpg")
|
||||
errors.NewHandler(t).FailOnErr(Run(repoDirectory(), "git", "diff"))
|
||||
errors.NewHandler(t).FailOnErr(Run(repoDirectory(), "git", "add", "."))
|
||||
errors.NewHandler(t).FailOnErr(Run(repoDirectory(), "git", "-c", "user.signingkey="+GpgGoodKeyID, "commit", "-S", "-am", "add file"))
|
||||
@@ -1165,7 +1117,7 @@ func AddSignedFile(t *testing.T, path, contents string) {
|
||||
func AddSignedTag(t *testing.T, name string) {
|
||||
t.Helper()
|
||||
prevGnuPGHome := os.Getenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", TmpDir+"/gpg")
|
||||
t.Setenv("GNUPGHOME", TmpDir()+"/gpg")
|
||||
defer t.Setenv("GNUPGHOME", prevGnuPGHome)
|
||||
errors.NewHandler(t).FailOnErr(Run(repoDirectory(), "git", "-c", "user.signingkey="+GpgGoodKeyID, "tag", "-sm", "add signed tag", name))
|
||||
if IsRemote() {
|
||||
@@ -1176,7 +1128,7 @@ func AddSignedTag(t *testing.T, name string) {
|
||||
func AddTag(t *testing.T, name string) {
|
||||
t.Helper()
|
||||
prevGnuPGHome := os.Getenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", TmpDir+"/gpg")
|
||||
t.Setenv("GNUPGHOME", TmpDir()+"/gpg")
|
||||
defer t.Setenv("GNUPGHOME", prevGnuPGHome)
|
||||
errors.NewHandler(t).FailOnErr(Run(repoDirectory(), "git", "tag", name))
|
||||
if IsRemote() {
|
||||
@@ -1187,7 +1139,7 @@ func AddTag(t *testing.T, name string) {
|
||||
func AddTagWithForce(t *testing.T, name string) {
|
||||
t.Helper()
|
||||
prevGnuPGHome := os.Getenv("GNUPGHOME")
|
||||
t.Setenv("GNUPGHOME", TmpDir+"/gpg")
|
||||
t.Setenv("GNUPGHOME", TmpDir()+"/gpg")
|
||||
defer t.Setenv("GNUPGHOME", prevGnuPGHome)
|
||||
errors.NewHandler(t).FailOnErr(Run(repoDirectory(), "git", "tag", "-f", name))
|
||||
if IsRemote() {
|
||||
|
||||
8
test/e2e/fixture/gitconfig
Normal file
8
test/e2e/fixture/gitconfig
Normal file
@@ -0,0 +1,8 @@
|
||||
# Git configuration for e2e tests
|
||||
# This file ensures reproducible test behavior by disabling system credential helpers
|
||||
|
||||
[credential]
|
||||
helper =
|
||||
|
||||
[core]
|
||||
askPass =
|
||||
@@ -23,7 +23,7 @@ func AddGPGPublicKey(t *testing.T) {
|
||||
if fixture.IsLocal() {
|
||||
keyData, err := os.ReadFile(keyPath)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(fmt.Sprintf("%s/app/config/gpg/source/%s", fixture.TmpDir, fixture.GpgGoodKeyID), keyData, 0o644)
|
||||
err = os.WriteFile(fmt.Sprintf("%s/app/config/gpg/source/%s", fixture.TmpDir(), fixture.GpgGoodKeyID), keyData, 0o644)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
fixture.RestartRepoServer(t)
|
||||
@@ -35,7 +35,7 @@ func DeleteGPGPublicKey(t *testing.T) {
|
||||
args := []string{"gpg", "rm", fixture.GpgGoodKeyID}
|
||||
errors.NewHandler(t).FailOnErr(fixture.RunCli(args...))
|
||||
if fixture.IsLocal() {
|
||||
require.NoError(t, os.Remove(fmt.Sprintf("%s/app/config/gpg/source/%s", fixture.TmpDir, fixture.GpgGoodKeyID)))
|
||||
require.NoError(t, os.Remove(fmt.Sprintf("%s/app/config/gpg/source/%s", fixture.TmpDir(), fixture.GpgGoodKeyID)))
|
||||
} else {
|
||||
fixture.RestartRepoServer(t)
|
||||
}
|
||||
|
||||
@@ -19,19 +19,19 @@ type Actions struct {
|
||||
}
|
||||
|
||||
func (a *Actions) SetParamInNotificationConfigMap(key, value string) *Actions {
|
||||
a.context.t.Helper()
|
||||
require.NoError(a.context.t, fixture.SetParamInNotificationsConfigMap(key, value))
|
||||
a.context.T().Helper()
|
||||
require.NoError(a.context.T(), fixture.SetParamInNotificationsConfigMap(key, value))
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
func (a *Actions) Healthcheck() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
_, err := fixture.DoHttpRequest("GET",
|
||||
"/metrics",
|
||||
fixture.GetNotificationServerAddress())
|
||||
|
||||
@@ -15,25 +15,25 @@ type Consequences struct {
|
||||
}
|
||||
|
||||
func (c *Consequences) Services(block func(services *notification.ServiceList, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.listServices())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) Healthy(block func(healthy bool)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.healthy)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) Triggers(block func(services *notification.TriggerList, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.listTriggers())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) Templates(block func(services *notification.TemplateList, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.listTemplates())
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -7,15 +7,23 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
*fixture.TestState
|
||||
}
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return &Context{t: t}
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return &Context{TestState: state}
|
||||
}
|
||||
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx)}
|
||||
}
|
||||
|
||||
func (c *Context) And(block func()) *Context {
|
||||
|
||||
@@ -42,38 +42,38 @@ func (a *Actions) Create(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) AddDestination(cluster string, namespace string) *Actions {
|
||||
a.runCli("proj", "add-destination", a.context.name, cluster, namespace)
|
||||
a.runCli("proj", "add-destination", a.context.GetName(), cluster, namespace)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddDestinationServiceAccount(cluster string, namespace string) *Actions {
|
||||
a.runCli("proj", "add-destination-service-account", a.context.name, cluster, namespace)
|
||||
a.runCli("proj", "add-destination-service-account", a.context.GetName(), cluster, namespace)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) AddSource(repo string) *Actions {
|
||||
a.runCli("proj", "add-source", a.context.name, repo)
|
||||
a.runCli("proj", "add-source", a.context.GetName(), repo)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) UpdateProject(updater func(project *v1alpha1.AppProject)) *Actions {
|
||||
proj, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.TestNamespace()).Get(context.TODO(), a.context.name, metav1.GetOptions{})
|
||||
require.NoError(a.context.t, err)
|
||||
proj, err := fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.TestNamespace()).Get(context.TODO(), a.context.GetName(), metav1.GetOptions{})
|
||||
require.NoError(a.context.T(), err)
|
||||
updater(proj)
|
||||
_, err = fixture.AppClientset.ArgoprojV1alpha1().AppProjects(fixture.TestNamespace()).Update(context.TODO(), proj, metav1.UpdateOptions{})
|
||||
require.NoError(a.context.t, err)
|
||||
require.NoError(a.context.T(), err)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Name(name string) *Actions {
|
||||
a.context.name = name
|
||||
func (a *Actions) SetName(name string) *Actions {
|
||||
a.context.SetName(name)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) prepareCreateArgs(args []string) []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{
|
||||
"proj", "create", a.context.name,
|
||||
"proj", "create", a.context.GetName(),
|
||||
}, args...)
|
||||
|
||||
if a.context.destination != "" {
|
||||
@@ -99,27 +99,27 @@ func (a *Actions) prepareCreateArgs(args []string) []string {
|
||||
}
|
||||
|
||||
func (a *Actions) Delete() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.runCli("proj", "delete", a.context.name)
|
||||
a.context.T().Helper()
|
||||
a.runCli("proj", "delete", a.context.GetName())
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) And(block func()) *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
block()
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
_, a.lastError = fixture.RunCli(args...)
|
||||
if !a.ignoreErrors {
|
||||
require.NoError(a.context.t, a.lastError)
|
||||
require.NoError(a.context.T(), a.lastError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func (c *Consequences) Expect() *Consequences {
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(app *project.DetailedProjectsResponse, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.detailedProject())
|
||||
return c
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func (c *Consequences) detailedProject() (*project.DetailedProjectsResponse, err
|
||||
func (c *Consequences) get() (*project.DetailedProjectsResponse, error) {
|
||||
_, projectClient, _ := fixture.ArgoCDClientset.NewProjectClient()
|
||||
prj, err := projectClient.GetDetailedProject(context.Background(), &project.ProjectQuery{
|
||||
Name: c.context.name,
|
||||
Name: c.context.GetName(),
|
||||
})
|
||||
|
||||
return prj, err
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
name string
|
||||
*fixture.TestState
|
||||
|
||||
destination string
|
||||
destinationServiceAccounts []string
|
||||
repos []string
|
||||
@@ -19,21 +20,19 @@ type Context struct {
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(t)
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(state)
|
||||
}
|
||||
|
||||
func GivenWithSameState(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
return &Context{t: t, name: fixture.Name()}
|
||||
}
|
||||
|
||||
func (c *Context) GetName() string {
|
||||
return c.name
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx)}
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.name = name
|
||||
c.SetName(name)
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ func (a *Actions) Create(args ...string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) prepareCreateArgs(args []string) []string {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
args = append([]string{
|
||||
"repo", "add", a.context.path,
|
||||
}, args...)
|
||||
@@ -49,19 +49,19 @@ func (a *Actions) prepareCreateArgs(args []string) []string {
|
||||
}
|
||||
|
||||
func (a *Actions) Delete() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("repo", "rm", a.context.path)
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) List() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("repo", "list")
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Actions) Get() *Actions {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.runCli("repo", "get", a.context.path)
|
||||
return a
|
||||
}
|
||||
@@ -77,13 +77,13 @@ func (a *Actions) Project(project string) *Actions {
|
||||
}
|
||||
|
||||
func (a *Actions) Then() *Consequences {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
time.Sleep(fixture.WhenThenSleepInterval)
|
||||
return &Consequences{a.context, a}
|
||||
}
|
||||
|
||||
func (a *Actions) runCli(args ...string) {
|
||||
a.context.t.Helper()
|
||||
a.context.T().Helper()
|
||||
a.lastOutput, a.lastError = fixture.RunCli(args...)
|
||||
if !a.ignoreErrors && a.lastError != nil {
|
||||
log.Fatal(a.lastOutput)
|
||||
|
||||
@@ -21,13 +21,13 @@ func (c *Consequences) Expect() *Consequences {
|
||||
}
|
||||
|
||||
func (c *Consequences) And(block func(repository *v1alpha1.Repository, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.repo())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Consequences) AndCLIOutput(block func(output string, err error)) *Consequences {
|
||||
c.context.t.Helper()
|
||||
c.context.T().Helper()
|
||||
block(c.actions.lastOutput, c.actions.lastError)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -7,33 +7,30 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
)
|
||||
|
||||
// this implements the "given" part of given/when/then
|
||||
// Context implements the "given" part of given/when/then.
|
||||
// It embeds fixture.TestState to provide test-specific state that enables parallel test execution.
|
||||
type Context struct {
|
||||
t *testing.T
|
||||
*fixture.TestState
|
||||
|
||||
path string
|
||||
name string
|
||||
project string
|
||||
}
|
||||
|
||||
func Given(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(t)
|
||||
state := fixture.EnsureCleanState(t)
|
||||
return GivenWithSameState(state)
|
||||
}
|
||||
|
||||
// GivenWithSameState skips cleaning state. Use this when you've already ensured you have a clean
|
||||
// state in your test setup don't want to waste time by doing so again.
|
||||
func GivenWithSameState(t *testing.T) *Context {
|
||||
t.Helper()
|
||||
return &Context{t: t, name: fixture.Name(), project: "default"}
|
||||
}
|
||||
|
||||
func (c *Context) GetName() string {
|
||||
return c.name
|
||||
// GivenWithSameState creates a new Context that shares the same TestState as an existing context.
|
||||
// Use this when you need multiple fixture contexts within the same test.
|
||||
func GivenWithSameState(ctx fixture.TestContext) *Context {
|
||||
ctx.T().Helper()
|
||||
return &Context{TestState: fixture.NewTestStateFromContext(ctx), project: "default"}
|
||||
}
|
||||
|
||||
func (c *Context) Name(name string) *Context {
|
||||
c.name = name
|
||||
c.SetName(name)
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
@@ -320,23 +320,16 @@ func PushImageToAuthenticatedOCIRegistry(t *testing.T, pathName, tag string) {
|
||||
))
|
||||
}
|
||||
|
||||
// AddHTTPSWriteCredentials adds write credentials for an HTTPS repository.
|
||||
// AddWriteCredentials adds write credentials for a repository.
|
||||
// Write credentials are used by the commit-server to push hydrated manifests back to the repository.
|
||||
// TODO: add CLI support for managing write credentials and use that here instead.
|
||||
func AddHTTPSWriteCredentials(t *testing.T, insecure bool, repoURLType fixture.RepoURLType) {
|
||||
func AddWriteCredentials(t *testing.T, name string, insecure bool, repoURLType fixture.RepoURLType) {
|
||||
t.Helper()
|
||||
repoURL := fixture.RepoURL(repoURLType)
|
||||
|
||||
// Create a Kubernetes secret with the repository-write label
|
||||
// Replace invalid characters for secret name
|
||||
secretName := "write-creds-" + fixture.Name()
|
||||
|
||||
// Delete existing secret if it exists (ignore error if not found)
|
||||
_ = fixture.KubeClientset.CoreV1().Secrets(fixture.ArgoCDNamespace).Delete(
|
||||
context.Background(),
|
||||
secretName,
|
||||
metav1.DeleteOptions{},
|
||||
)
|
||||
secretName := "write-creds-" + name
|
||||
|
||||
_, err := fixture.KubeClientset.CoreV1().Secrets(fixture.ArgoCDNamespace).Create(
|
||||
context.Background(),
|
||||
|
||||
@@ -20,6 +20,8 @@ func DnsFriendly(str string, postfix string) string { //nolint:revive //FIXME(va
|
||||
str = matchFirstCap.ReplaceAllString(str, "${1}-${2}")
|
||||
str = matchAllCap.ReplaceAllString(str, "${1}-${2}")
|
||||
str = strings.ToLower(str)
|
||||
str = strings.ReplaceAll(str, "/", "-")
|
||||
str = strings.ReplaceAll(str, "_", "-")
|
||||
|
||||
if diff := len(str) + len(postfix) - 63; diff > 0 {
|
||||
str = str[:len(str)-diff]
|
||||
|
||||
@@ -134,7 +134,7 @@ func TestAnnotatedTagInStatusSyncRevision(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(app *Application) {
|
||||
annotatedTagIDOutput, err := fixture.Run(fixture.TmpDir+"/testdata.git", "git", "show-ref", "annotated-tag")
|
||||
annotatedTagIDOutput, err := fixture.Run(fixture.TmpDir()+"/testdata.git", "git", "show-ref", "annotated-tag")
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, annotatedTagIDOutput)
|
||||
// example command output:
|
||||
@@ -142,7 +142,7 @@ func TestAnnotatedTagInStatusSyncRevision(t *testing.T) {
|
||||
annotatedTagIDFields := strings.Fields(string(annotatedTagIDOutput))
|
||||
require.Len(t, annotatedTagIDFields, 2)
|
||||
|
||||
targetCommitID, err := fixture.Run(fixture.TmpDir+"/testdata.git", "git", "rev-parse", "--verify", "annotated-tag^{commit}")
|
||||
targetCommitID, err := fixture.Run(fixture.TmpDir()+"/testdata.git", "git", "rev-parse", "--verify", "annotated-tag^{commit}")
|
||||
// example command output:
|
||||
// "bcd35965e494273355265b9f0bf85075b6bc5163"
|
||||
require.NoError(t, err)
|
||||
@@ -157,8 +157,8 @@ func TestAnnotatedTagInStatusSyncRevision(t *testing.T) {
|
||||
|
||||
// Test updates to K8s resources should not trigger a self-heal when self-heal is false.
|
||||
func TestAutomatedSelfHealingAgainstAnnotatedTag(t *testing.T) {
|
||||
Given(t).
|
||||
Path(guestbookPath).
|
||||
ctx := Given(t)
|
||||
ctx.Path(guestbookPath).
|
||||
When().
|
||||
AddAnnotatedTag("annotated-tag", "my-generic-tag-message").
|
||||
// App should be auto-synced once created
|
||||
@@ -178,7 +178,7 @@ func TestAutomatedSelfHealingAgainstAnnotatedTag(t *testing.T) {
|
||||
And(func() {
|
||||
// Deployment revisionHistoryLimit should switch to 10
|
||||
timeoutErr := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 10*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -190,14 +190,14 @@ func TestAutomatedSelfHealingAgainstAnnotatedTag(t *testing.T) {
|
||||
}).
|
||||
// Update the Deployment to a different revisionHistoryLimit
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 9}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
// The revisionHistoryLimit should NOT be self-healed, because selfHealing: false. It should remain at 9.
|
||||
And(func() {
|
||||
// Wait up to 10 seconds to ensure that deployment revisionHistoryLimit does NOT should switch to 10, it should remain at 9.
|
||||
waitErr := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 10*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -210,8 +210,8 @@ func TestAutomatedSelfHealingAgainstAnnotatedTag(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAutomatedSelfHealingAgainstLightweightTag(t *testing.T) {
|
||||
Given(t).
|
||||
Path(guestbookPath).
|
||||
ctx := Given(t)
|
||||
ctx.Path(guestbookPath).
|
||||
When().
|
||||
AddTag("annotated-tag").
|
||||
// App should be auto-synced once created
|
||||
@@ -231,7 +231,7 @@ func TestAutomatedSelfHealingAgainstLightweightTag(t *testing.T) {
|
||||
And(func() {
|
||||
// Deployment revisionHistoryLimit should switch to 10
|
||||
timeoutErr := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 10*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -243,14 +243,14 @@ func TestAutomatedSelfHealingAgainstLightweightTag(t *testing.T) {
|
||||
}).
|
||||
// Update the Deployment to a different revisionHistoryLimit
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Patch(t.Context(),
|
||||
errors.NewHandler(t).FailOnErr(fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Patch(t.Context(),
|
||||
"guestbook-ui", types.MergePatchType, []byte(`{"spec": {"revisionHistoryLimit": 9}}`), metav1.PatchOptions{}))
|
||||
}).
|
||||
// The revisionHistoryLimit should NOT be self-healed, because selfHealing: false
|
||||
And(func() {
|
||||
// Wait up to 10 seconds to ensure that deployment revisionHistoryLimit does NOT should switch to 10, it should remain at 9.
|
||||
waitErr := wait.PollUntilContextTimeout(t.Context(), 1*time.Second, 10*time.Second, true, func(context.Context) (done bool, err error) {
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(fixture.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
deployment, err := fixture.KubeClientset.AppsV1().Deployments(ctx.DeploymentNamespace()).Get(t.Context(), "guestbook-ui", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
)
|
||||
|
||||
func TestHelmHooksAreCreated(t *testing.T) {
|
||||
Given(t).
|
||||
Path("hook").
|
||||
ctx := Given(t)
|
||||
ctx.Path("hook").
|
||||
When().
|
||||
PatchFile("hook.yaml", `[{"op": "replace", "path": "/metadata/annotations", "value": {"helm.sh/hook": "pre-install"}}]`).
|
||||
CreateApp().
|
||||
@@ -31,7 +31,7 @@ func TestHelmHooksAreCreated(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(HealthIs(health.HealthStatusHealthy)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: fixture.DeploymentNamespace(), Name: "hook", Message: "pod/hook created", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, HookType: HookTypePreSync, HookPhase: OperationSucceeded, SyncPhase: SyncPhasePreSync}))
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: ctx.DeploymentNamespace(), Name: "hook", Message: "pod/hook created", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, HookType: HookTypePreSync, HookPhase: OperationSucceeded, SyncPhase: SyncPhasePreSync}))
|
||||
}
|
||||
|
||||
// make sure we treat Helm weights as a sync wave
|
||||
@@ -313,8 +313,8 @@ func TestHelmSetFile(t *testing.T) {
|
||||
|
||||
// ensure we can use envsubst in "set" variables
|
||||
func TestHelmSetEnv(t *testing.T) {
|
||||
Given(t).
|
||||
Path("helm-values").
|
||||
ctx := Given(t)
|
||||
ctx.Path("helm-values").
|
||||
When().
|
||||
CreateApp().
|
||||
AppSet("--helm-set", "foo=$ARGOCD_APP_NAME").
|
||||
@@ -323,13 +323,13 @@ func TestHelmSetEnv(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
assert.Equal(t, fixture.Name(), errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
assert.Equal(t, ctx.GetName(), errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
})
|
||||
}
|
||||
|
||||
func TestHelmSetStringEnv(t *testing.T) {
|
||||
Given(t).
|
||||
Path("helm-values").
|
||||
ctx := Given(t)
|
||||
ctx.Path("helm-values").
|
||||
When().
|
||||
CreateApp().
|
||||
AppSet("--helm-set-string", "foo=$ARGOCD_APP_NAME").
|
||||
@@ -338,22 +338,22 @@ func TestHelmSetStringEnv(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
assert.Equal(t, fixture.Name(), errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
assert.Equal(t, ctx.GetName(), errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
})
|
||||
}
|
||||
|
||||
// make sure kube-version gets passed down to resources
|
||||
func TestKubeVersion(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "HELM")
|
||||
Given(t).
|
||||
Path("helm-kube-version").
|
||||
ctx := Given(t)
|
||||
ctx.Path("helm-kube-version").
|
||||
When().
|
||||
CreateApp().
|
||||
Sync().
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.kubeVersion}")).(string)
|
||||
// Capabilities.KubeVersion defaults to 1.9.0, we assume here you are running a later version
|
||||
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.Format("v%s.%s"), kubeVersion)
|
||||
@@ -365,7 +365,7 @@ func TestKubeVersion(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
assert.Equal(t, "v999.999.999", errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
assert.Equal(t, "v999.999.999", errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.kubeVersion}")).(string))
|
||||
})
|
||||
}
|
||||
@@ -373,15 +373,15 @@ func TestKubeVersion(t *testing.T) {
|
||||
// make sure api versions gets passed down to resources
|
||||
func TestApiVersions(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "HELM")
|
||||
Given(t).
|
||||
Path("helm-api-versions").
|
||||
ctx := Given(t)
|
||||
ctx.Path("helm-api-versions").
|
||||
When().
|
||||
CreateApp().
|
||||
Sync().
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.apiVersions}")).(string)
|
||||
// The v1 API shouldn't be going anywhere.
|
||||
assert.Contains(t, apiVersions, "v1")
|
||||
@@ -393,7 +393,7 @@ func TestApiVersions(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.apiVersions}")).(string)
|
||||
assert.Contains(t, apiVersions, "v1/MyTestResource")
|
||||
})
|
||||
@@ -470,18 +470,16 @@ func TestHelmWithMultipleDependencies(t *testing.T) {
|
||||
func TestHelmDependenciesPermissionDenied(t *testing.T) {
|
||||
fixture.SkipOnEnv(t, "HELM")
|
||||
|
||||
projName := "argo-helm-project-denied"
|
||||
projectFixture.
|
||||
Given(t).
|
||||
Name(projName).
|
||||
ctx := projectFixture.Given(t)
|
||||
ctx.Name("argo-helm-project-denied").
|
||||
Destination("*,*").
|
||||
When().
|
||||
Create().
|
||||
AddSource(fixture.RepoURL(fixture.RepoURLTypeFile))
|
||||
|
||||
expectedErr := fmt.Sprintf("helm repos localhost:5000/myrepo are not permitted in project '%s'", projName)
|
||||
GivenWithSameState(t).
|
||||
Project(projName).
|
||||
expectedErr := fmt.Sprintf("helm repos localhost:5000/myrepo are not permitted in project '%s'", ctx.GetName())
|
||||
GivenWithSameState(ctx).
|
||||
Project(ctx.GetName()).
|
||||
Path("helm-oci-with-dependencies").
|
||||
CustomCACertAdded().
|
||||
HelmHTTPSCredentialsUserPassAdded().
|
||||
@@ -492,9 +490,9 @@ func TestHelmDependenciesPermissionDenied(t *testing.T) {
|
||||
Then().
|
||||
Expect(Error("", expectedErr))
|
||||
|
||||
expectedErr = fmt.Sprintf("helm repos https://localhost:9443/argo-e2e/testdata.git/helm-repo/local, https://localhost:9443/argo-e2e/testdata.git/helm-repo/local2 are not permitted in project '%s'", projName)
|
||||
GivenWithSameState(t).
|
||||
Project(projName).
|
||||
expectedErr = fmt.Sprintf("helm repos https://localhost:9443/argo-e2e/testdata.git/helm-repo/local, https://localhost:9443/argo-e2e/testdata.git/helm-repo/local2 are not permitted in project '%s'", ctx.GetName())
|
||||
GivenWithSameState(ctx).
|
||||
Project(ctx.GetName()).
|
||||
Path("helm-with-multiple-dependencies-permission-denied").
|
||||
CustomCACertAdded().
|
||||
HelmHTTPSCredentialsUserPassAdded().
|
||||
|
||||
@@ -37,7 +37,8 @@ func TestPostSyncHookSuccessful(t *testing.T) {
|
||||
// make sure we can run a standard sync hook
|
||||
func testHookSuccessful(t *testing.T, hookType HookType) {
|
||||
t.Helper()
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("hook").
|
||||
When().
|
||||
PatchFile("hook.yaml", fmt.Sprintf(`[{"op": "replace", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/hook": %q}}]`, hookType)).
|
||||
@@ -49,11 +50,12 @@ func testHookSuccessful(t *testing.T, hookType HookType) {
|
||||
Expect(ResourceSyncStatusIs("Pod", "pod", SyncStatusCodeSynced)).
|
||||
Expect(ResourceHealthIs("Pod", "pod", health.HealthStatusHealthy)).
|
||||
Expect(ResourceResultNumbering(2)).
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: DeploymentNamespace(), Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Name: "hook", Message: "pod/hook created", HookType: hookType, HookPhase: OperationSucceeded, SyncPhase: SyncPhase(hookType)}))
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: ctx.DeploymentNamespace(), Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Name: "hook", Message: "pod/hook created", HookType: hookType, HookPhase: OperationSucceeded, SyncPhase: SyncPhase(hookType)}))
|
||||
}
|
||||
|
||||
func TestPreDeleteHook(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("pre-delete-hook").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -61,7 +63,7 @@ func TestPreDeleteHook(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
_, err := KubeClientset.CoreV1().ConfigMaps(DeploymentNamespace()).Get(
|
||||
_, err := KubeClientset.CoreV1().ConfigMaps(ctx.DeploymentNamespace()).Get(
|
||||
t.Context(), "guestbook-ui", metav1.GetOptions{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -115,13 +117,14 @@ func TestPostDeleteHook(t *testing.T) {
|
||||
|
||||
// make sure that hooks do not appear in "argocd app diff"
|
||||
func TestHookDiff(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("hook").
|
||||
When().
|
||||
CreateApp().
|
||||
Then().
|
||||
And(func(_ *Application) {
|
||||
output, err := RunCli("app", "diff", Name())
|
||||
output, err := RunCli("app", "diff", ctx.GetName())
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, output, "name: pod")
|
||||
assert.NotContains(t, output, "name: hook")
|
||||
@@ -221,7 +224,8 @@ func TestPostSyncHookPodFailure(t *testing.T) {
|
||||
|
||||
func TestSyncFailHookPodFailure(t *testing.T) {
|
||||
// Tests that a SyncFail hook will successfully run upon a pod failure (which leads to a sync failure)
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("hook").
|
||||
When().
|
||||
IgnoreErrors().
|
||||
@@ -246,13 +250,14 @@ spec:
|
||||
CreateApp().
|
||||
Sync().
|
||||
Then().
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: DeploymentNamespace(), Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Name: "sync-fail-hook", Message: "pod/sync-fail-hook created", HookType: HookTypeSyncFail, HookPhase: OperationSucceeded, SyncPhase: SyncPhaseSyncFail})).
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: ctx.DeploymentNamespace(), Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Name: "sync-fail-hook", Message: "pod/sync-fail-hook created", HookType: HookTypeSyncFail, HookPhase: OperationSucceeded, SyncPhase: SyncPhaseSyncFail})).
|
||||
Expect(OperationPhaseIs(OperationFailed))
|
||||
}
|
||||
|
||||
func TestSyncFailHookPodFailureSyncFailFailure(t *testing.T) {
|
||||
// Tests that a failing SyncFail hook will successfully be marked as failed
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("hook").
|
||||
When().
|
||||
IgnoreErrors().
|
||||
@@ -293,8 +298,8 @@ spec:
|
||||
CreateApp().
|
||||
Sync().
|
||||
Then().
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: DeploymentNamespace(), Name: "successful-sync-fail-hook", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Message: "pod/successful-sync-fail-hook created", HookType: HookTypeSyncFail, HookPhase: OperationSucceeded, SyncPhase: SyncPhaseSyncFail})).
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: DeploymentNamespace(), Name: "failed-sync-fail-hook", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Message: `container "main" failed with exit code 1`, HookType: HookTypeSyncFail, HookPhase: OperationFailed, SyncPhase: SyncPhaseSyncFail})).
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: ctx.DeploymentNamespace(), Name: "successful-sync-fail-hook", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Message: "pod/successful-sync-fail-hook created", HookType: HookTypeSyncFail, HookPhase: OperationSucceeded, SyncPhase: SyncPhaseSyncFail})).
|
||||
Expect(ResourceResultIs(ResourceResult{Version: "v1", Kind: "Pod", Namespace: ctx.DeploymentNamespace(), Name: "failed-sync-fail-hook", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Message: `container "main" failed with exit code 1`, HookType: HookTypeSyncFail, HookPhase: OperationFailed, SyncPhase: SyncPhaseSyncFail})).
|
||||
Expect(OperationPhaseIs(OperationFailed))
|
||||
}
|
||||
|
||||
@@ -360,7 +365,8 @@ func TestHookDeletePolicyHookFailedHookExit1(t *testing.T) {
|
||||
// make sure that we can run the hook twice
|
||||
func TestHookBeforeHookCreation(t *testing.T) {
|
||||
var creationTimestamp1 string
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("hook").
|
||||
When().
|
||||
PatchFile("hook.yaml", `[{"op": "add", "path": "/metadata/annotations/argocd.argoproj.io~1hook-delete-policy", "value": "BeforeHookCreation"}]`).
|
||||
@@ -375,7 +381,7 @@ func TestHookBeforeHookCreation(t *testing.T) {
|
||||
Expect(Pod(func(p corev1.Pod) bool { return p.Name == "hook" })).
|
||||
And(func(_ *Application) {
|
||||
var err error
|
||||
creationTimestamp1, err = getCreationTimestamp()
|
||||
creationTimestamp1, err = getCreationTimestamp(ctx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, creationTimestamp1)
|
||||
// pause to ensure that timestamp will change
|
||||
@@ -390,7 +396,7 @@ func TestHookBeforeHookCreation(t *testing.T) {
|
||||
Expect(ResourceResultNumbering(2)).
|
||||
Expect(Pod(func(p corev1.Pod) bool { return p.Name == "hook" })).
|
||||
And(func(_ *Application) {
|
||||
creationTimestamp2, err := getCreationTimestamp()
|
||||
creationTimestamp2, err := getCreationTimestamp(ctx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, creationTimestamp2)
|
||||
assert.NotEqual(t, creationTimestamp1, creationTimestamp2)
|
||||
@@ -417,8 +423,8 @@ func TestHookBeforeHookCreationFailure(t *testing.T) {
|
||||
Expect(ResourceResultNumbering(2))
|
||||
}
|
||||
|
||||
func getCreationTimestamp() (string, error) {
|
||||
return Run(".", "kubectl", "-n", DeploymentNamespace(), "get", "pod", "hook", "-o", "jsonpath={.metadata.creationTimestamp}")
|
||||
func getCreationTimestamp(deploymentNamespace string) (string, error) {
|
||||
return Run(".", "kubectl", "-n", deploymentNamespace, "get", "pod", "hook", "-o", "jsonpath={.metadata.creationTimestamp}")
|
||||
}
|
||||
|
||||
// make sure that we never create something annotated with Skip
|
||||
@@ -486,7 +492,8 @@ func TestHookFinalizerPostSync(t *testing.T) {
|
||||
|
||||
func testHookFinalizer(t *testing.T, hookType HookType) {
|
||||
t.Helper()
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
And(func() {
|
||||
require.NoError(t, SetResourceOverrides(map[string]ResourceOverride{
|
||||
lua.GetConfigMapKey(schema.FromAPIVersionAndKind("batch/v1", "Job")): {
|
||||
@@ -522,5 +529,5 @@ func testHookFinalizer(t *testing.T, hookType HookType) {
|
||||
Expect(ResourceSyncStatusIs("Pod", "pod", SyncStatusCodeSynced)).
|
||||
Expect(ResourceHealthIs("Pod", "pod", health.HealthStatusHealthy)).
|
||||
Expect(ResourceResultNumbering(2)).
|
||||
Expect(ResourceResultIs(ResourceResult{Group: "batch", Version: "v1", Kind: "Job", Namespace: DeploymentNamespace(), Name: "hook", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Message: "Resource has finalizer", HookType: hookType, HookPhase: OperationSucceeded, SyncPhase: SyncPhase(hookType)}))
|
||||
Expect(ResourceResultIs(ResourceResult{Group: "batch", Version: "v1", Kind: "Job", Namespace: ctx.DeploymentNamespace(), Name: "hook", Images: []string{"quay.io/argoprojlabs/argocd-e2e-container:0.1"}, Message: "Resource has finalizer", HookType: hookType, HookPhase: OperationSucceeded, SyncPhase: SyncPhase(hookType)}))
|
||||
}
|
||||
|
||||
@@ -2,14 +2,12 @@ package e2e
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
. "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
|
||||
. "github.com/argoproj/argo-cd/v3/test/e2e/fixture/app"
|
||||
"github.com/argoproj/argo-cd/v3/test/e2e/fixture/repos"
|
||||
|
||||
. "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
)
|
||||
@@ -138,8 +136,8 @@ func TestKustomizeVersionOverride(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHydratorWithHelm(t *testing.T) {
|
||||
Given(t).
|
||||
Path("hydrator-helm").
|
||||
ctx := Given(t)
|
||||
ctx.Path("hydrator-helm").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
app.Spec.Source = nil
|
||||
@@ -170,24 +168,24 @@ func TestHydratorWithHelm(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
// Verify that the inline helm parameter was applied
|
||||
output, err := fixture.Run("", "kubectl", "-n="+fixture.DeploymentNamespace(),
|
||||
output, err := fixture.Run("", "kubectl", "-n="+ctx.DeploymentNamespace(),
|
||||
"get", "configmap", "my-map",
|
||||
"-ojsonpath={.data.message}")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "helm-hydrated-with-inline-params", output)
|
||||
|
||||
// Verify that the namespace was passed to helm
|
||||
output, err = fixture.Run("", "kubectl", "-n="+fixture.DeploymentNamespace(),
|
||||
output, err = fixture.Run("", "kubectl", "-n="+ctx.DeploymentNamespace(),
|
||||
"get", "configmap", "my-map",
|
||||
"-ojsonpath={.data.helmns}")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fixture.DeploymentNamespace(), output)
|
||||
require.Equal(t, ctx.DeploymentNamespace(), output)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHydratorWithKustomize(t *testing.T) {
|
||||
Given(t).
|
||||
Path("hydrator-kustomize").
|
||||
ctx := Given(t)
|
||||
ctx.Path("hydrator-kustomize").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
app.Spec.Source = nil
|
||||
@@ -218,15 +216,15 @@ func TestHydratorWithKustomize(t *testing.T) {
|
||||
// Verify that the inline kustomize nameSuffix was applied
|
||||
// kustomization.yaml has namePrefix: kustomize-, and we added nameSuffix: -inline
|
||||
// So the ConfigMap name should be kustomize-my-map-inline
|
||||
_, err := fixture.Run("", "kubectl", "-n="+fixture.DeploymentNamespace(),
|
||||
_, err := fixture.Run("", "kubectl", "-n="+ctx.DeploymentNamespace(),
|
||||
"get", "configmap", "kustomize-my-map-inline")
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHydratorWithDirectory(t *testing.T) {
|
||||
Given(t).
|
||||
Path("hydrator-directory").
|
||||
ctx := Given(t)
|
||||
ctx.Path("hydrator-directory").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
app.Spec.Source = nil
|
||||
@@ -255,20 +253,16 @@ func TestHydratorWithDirectory(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
// Verify that the recurse option was applied by checking the ConfigMap from subdir
|
||||
_, err := fixture.Run("", "kubectl", "-n="+fixture.DeploymentNamespace(),
|
||||
_, err := fixture.Run("", "kubectl", "-n="+ctx.DeploymentNamespace(),
|
||||
"get", "configmap", "my-map-subdir")
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHydratorWithPlugin(t *testing.T) {
|
||||
Given(t).
|
||||
Path("hydrator-plugin").
|
||||
And(func() {
|
||||
go startCMPServer(t, "./testdata/hydrator-plugin")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Setenv("ARGOCD_BINARY_NAME", "argocd")
|
||||
}).
|
||||
ctx := Given(t)
|
||||
ctx.Path("hydrator-plugin").
|
||||
RunningCMPServer("./testdata/hydrator-plugin").
|
||||
When().
|
||||
CreateFromFile(func(app *Application) {
|
||||
app.Spec.Source = nil
|
||||
@@ -299,7 +293,7 @@ func TestHydratorWithPlugin(t *testing.T) {
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
// Verify that the inline plugin env was applied
|
||||
output, err := fixture.Run("", "kubectl", "-n="+fixture.DeploymentNamespace(),
|
||||
output, err := fixture.Run("", "kubectl", "-n="+ctx.DeploymentNamespace(),
|
||||
"get", "configmap", "plugin-generated-map",
|
||||
"-ojsonpath={.data.plugin-env}")
|
||||
require.NoError(t, err)
|
||||
@@ -364,12 +358,10 @@ func TestHydratorWithAuthenticatedRepo(t *testing.T) {
|
||||
// need to fetch existing git notes from the authenticated repository, which requires
|
||||
// credentials.
|
||||
Given(t).
|
||||
HTTPSInsecureRepoURLAdded(true).
|
||||
RepoURLType(fixture.RepoURLTypeHTTPS).
|
||||
HTTPSInsecureRepoURLAdded(true).
|
||||
// Add write credentials for commit-server to push hydrated manifests
|
||||
And(func() {
|
||||
repos.AddHTTPSWriteCredentials(t, true, fixture.RepoURLTypeHTTPS)
|
||||
}).
|
||||
WriteCredentials(true).
|
||||
DrySourcePath("guestbook").
|
||||
DrySourceRevision("HEAD").
|
||||
SyncSourcePath("guestbook").
|
||||
|
||||
@@ -75,7 +75,8 @@ func TestJsonnetTlaParameterAppliedCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJsonnetTlaEnv(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("jsonnet-tla-cm").
|
||||
When().
|
||||
CreateApp("--jsonnet-tla-str", "foo=$ARGOCD_APP_NAME", "--jsonnet-tla-code", "bar='$ARGOCD_APP_NAME'").
|
||||
@@ -84,13 +85,14 @@ func TestJsonnetTlaEnv(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
assert.Equal(t, Name(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
assert.Equal(t, Name(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.bar}")).(string))
|
||||
assert.Equal(t, ctx.GetName(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
assert.Equal(t, ctx.GetName(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.bar}")).(string))
|
||||
})
|
||||
}
|
||||
|
||||
func TestJsonnetExtVarEnv(t *testing.T) {
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Path("jsonnet-ext-var").
|
||||
When().
|
||||
CreateApp("--jsonnet-ext-var-str", "foo=$ARGOCD_APP_NAME", "--jsonnet-ext-var-code", "bar='$ARGOCD_APP_NAME'").
|
||||
@@ -99,8 +101,8 @@ func TestJsonnetExtVarEnv(t *testing.T) {
|
||||
Expect(OperationPhaseIs(OperationSucceeded)).
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
assert.Equal(t, Name(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
assert.Equal(t, Name(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.bar}")).(string))
|
||||
assert.Equal(t, ctx.GetName(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.foo}")).(string))
|
||||
assert.Equal(t, ctx.GetName(), errors.NewHandler(t).FailOnErr(Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map", "-o", "jsonpath={.data.bar}")).(string))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -16,11 +16,12 @@ import (
|
||||
)
|
||||
|
||||
func TestKustomize2AppSource(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
patchLabelMatchesFor := func(kind string) func(app *Application) {
|
||||
return func(_ *Application) {
|
||||
name := "k2-patched-guestbook-ui-deploy1"
|
||||
labelValue, err := fixture.Run(
|
||||
"", "kubectl", "-n="+fixture.DeploymentNamespace(),
|
||||
"", "kubectl", "-n="+ctx.DeploymentNamespace(),
|
||||
"get", kind, name,
|
||||
"-ojsonpath={.metadata.labels.patched-by}")
|
||||
require.NoError(t, err)
|
||||
@@ -28,7 +29,7 @@ func TestKustomize2AppSource(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
Given(t).
|
||||
ctx.
|
||||
Path(guestbookPath).
|
||||
NamePrefix("k2-").
|
||||
NameSuffix("-deploy1").
|
||||
@@ -181,13 +182,14 @@ func TestKustomizeImages(t *testing.T) {
|
||||
|
||||
// make sure we we can invoke the CLI to replace replicas and actual deployment is set to correct value
|
||||
func TestKustomizeReplicas2AppSource(t *testing.T) {
|
||||
ctx := Given(t)
|
||||
deploymentName := "guestbook-ui"
|
||||
deploymentReplicas := 2
|
||||
checkReplicasFor := func(kind string) func(app *Application) {
|
||||
return func(_ *Application) {
|
||||
name := deploymentName
|
||||
replicas, err := fixture.Run(
|
||||
"", "kubectl", "-n="+fixture.DeploymentNamespace(),
|
||||
"", "kubectl", "-n="+ctx.DeploymentNamespace(),
|
||||
"get", kind, name,
|
||||
"-ojsonpath={.spec.replicas}")
|
||||
require.NoError(t, err)
|
||||
@@ -195,7 +197,7 @@ func TestKustomizeReplicas2AppSource(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
Given(t).
|
||||
ctx.
|
||||
Path("guestbook").
|
||||
When().
|
||||
CreateApp().
|
||||
@@ -289,8 +291,8 @@ func TestKustomizeUnsetOverrideDeployment(t *testing.T) {
|
||||
|
||||
// make sure kube-version gets passed down to resources
|
||||
func TestKustomizeKubeVersion(t *testing.T) {
|
||||
Given(t).
|
||||
Path("kustomize-kube-version").
|
||||
ctx := Given(t)
|
||||
ctx.Path("kustomize-kube-version").
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.Run("", "kubectl", "patch", "cm", "argocd-cm",
|
||||
"-n", fixture.TestNamespace(),
|
||||
@@ -302,7 +304,7 @@ func TestKustomizeKubeVersion(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
kubeVersion := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.kubeVersion}")).(string)
|
||||
// Capabilities.KubeVersion defaults to 1.9.0, we assume here you are running a later version
|
||||
assert.LessOrEqual(t, fixture.GetVersions(t).ServerVersion.Format("v%s.%s"), kubeVersion)
|
||||
@@ -314,15 +316,15 @@ func TestKustomizeKubeVersion(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
assert.Equal(t, "v999.999.999", errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
assert.Equal(t, "v999.999.999", errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.kubeVersion}")).(string))
|
||||
})
|
||||
}
|
||||
|
||||
// make sure api versions gets passed down to resources
|
||||
func TestKustomizeApiVersions(t *testing.T) {
|
||||
Given(t).
|
||||
Path("kustomize-api-versions").
|
||||
ctx := Given(t)
|
||||
ctx.Path("kustomize-api-versions").
|
||||
And(func() {
|
||||
errors.NewHandler(t).FailOnErr(fixture.Run("", "kubectl", "patch", "cm", "argocd-cm",
|
||||
"-n", fixture.TestNamespace(),
|
||||
@@ -334,7 +336,7 @@ func TestKustomizeApiVersions(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.apiVersions}")).(string)
|
||||
// The v1 API shouldn't be going anywhere.
|
||||
assert.Contains(t, apiVersions, "v1")
|
||||
@@ -346,7 +348,7 @@ func TestKustomizeApiVersions(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeSynced)).
|
||||
And(func(_ *Application) {
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", fixture.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
apiVersions := errors.NewHandler(t).FailOnErr(fixture.Run(".", "kubectl", "-n", ctx.DeploymentNamespace(), "get", "cm", "my-map",
|
||||
"-o", "jsonpath={.data.apiVersions}")).(string)
|
||||
assert.Contains(t, apiVersions, "v1/MyTestResource")
|
||||
})
|
||||
|
||||
@@ -23,7 +23,8 @@ const managedByURLTestPath = "guestbook"
|
||||
func TestManagedByURLWithAnnotation(t *testing.T) {
|
||||
managedByURL := "https://argocd-instance-b.example.com"
|
||||
|
||||
Given(t).
|
||||
ctx := Given(t)
|
||||
ctx.
|
||||
Project("default").
|
||||
Path(managedByURLTestPath).
|
||||
When().
|
||||
@@ -31,7 +32,7 @@ func TestManagedByURLWithAnnotation(t *testing.T) {
|
||||
And(func() {
|
||||
// Add managed-by-url annotation to the application with retry logic
|
||||
for i := 0; i < 3; i++ {
|
||||
appObj, err := fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.ArgoCDNamespace).Get(t.Context(), fixture.Name(), metav1.GetOptions{})
|
||||
appObj, err := fixture.AppClientset.ArgoprojV1alpha1().Applications(fixture.ArgoCDNamespace).Get(t.Context(), ctx.GetName(), metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
if appObj.Annotations == nil {
|
||||
|
||||
@@ -56,9 +56,6 @@ func TestListMatrixGenerator(t *testing.T) {
|
||||
// Create a ClusterGenerator-based ApplicationSet
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "matrix-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{values.name}}-{{path.basename}}"},
|
||||
@@ -181,9 +178,6 @@ func TestClusterMatrixGenerator(t *testing.T) {
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
CreateClusterSecret("my-secret2", "cluster2", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "matrix-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-{{path.basename}}"},
|
||||
@@ -303,9 +297,6 @@ func TestMatrixTerminalMatrixGeneratorSelector(t *testing.T) {
|
||||
// Create ApplicationSet with LabelSelector on an ApplicationSetTerminalGenerator
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "matrix-generator-nested-matrix",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{values.name}}-{{path.basename}}"},
|
||||
@@ -442,9 +433,6 @@ func TestMatrixTerminalMergeGeneratorSelector(t *testing.T) {
|
||||
// Create ApplicationSet with LabelSelector on an ApplicationSetTerminalGenerator
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "matrix-generator-nested-merge",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}-{{name-suffix}}"},
|
||||
|
||||
@@ -54,9 +54,6 @@ func TestListMergeGenerator(t *testing.T) {
|
||||
// Create a ClusterGenerator-based ApplicationSet
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "merge-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}-{{name-suffix}}"},
|
||||
@@ -180,9 +177,6 @@ func TestClusterMergeGenerator(t *testing.T) {
|
||||
CreateClusterSecret("my-secret", "cluster1", "https://kubernetes.default.svc").
|
||||
CreateClusterSecret("my-secret2", "cluster2", "https://kubernetes.default.svc").
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "merge-generator",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{name}}-{{path.basename}}-{{values.name-suffix}}"},
|
||||
@@ -318,9 +312,6 @@ func TestMergeTerminalMergeGeneratorSelector(t *testing.T) {
|
||||
// Create ApplicationSet with LabelSelector on an ApplicationSetTerminalGenerator
|
||||
When().
|
||||
Create(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "merge-generator-nested-merge",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{Name: "{{path.basename}}-{{name-suffix}}"},
|
||||
|
||||
@@ -27,10 +27,10 @@ func TestKubectlMetrics(t *testing.T) {
|
||||
Then().
|
||||
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
|
||||
And(func(app *Application) {
|
||||
assert.Equal(t, fixture.Name(), app.Name)
|
||||
assert.Equal(t, ctx.GetName(), app.Name)
|
||||
assert.Equal(t, fixture.RepoURL(fixture.RepoURLTypeFile), app.Spec.GetSource().RepoURL)
|
||||
assert.Equal(t, guestbookPath, app.Spec.GetSource().Path)
|
||||
assert.Equal(t, fixture.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, ctx.DeploymentNamespace(), app.Spec.Destination.Namespace)
|
||||
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
|
||||
}).
|
||||
Expect(Event(argo.EventReasonResourceCreated, "create")).
|
||||
@@ -38,7 +38,7 @@ func TestKubectlMetrics(t *testing.T) {
|
||||
// app should be listed
|
||||
output, err := fixture.RunCli("app", "list")
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, output, fixture.Name())
|
||||
assert.Contains(t, output, ctx.GetName())
|
||||
}).
|
||||
When().
|
||||
// ensure that create is idempotent
|
||||
|
||||
@@ -42,9 +42,9 @@ func assertProjHasEvent(t *testing.T, a *v1alpha1.AppProject, message string, re
|
||||
}
|
||||
|
||||
func TestProjectCreation(t *testing.T) {
|
||||
fixture.EnsureCleanState(t)
|
||||
ctx := fixture.EnsureCleanState(t)
|
||||
|
||||
projectName := "proj-" + fixture.Name()
|
||||
projectName := "proj-" + ctx.GetName()
|
||||
_, err := fixture.RunCli("proj", "create", projectName,
|
||||
"--description", "Test description",
|
||||
"-d", "https://192.168.99.100:8443,default",
|
||||
@@ -445,9 +445,9 @@ func TestRemoveOrphanedIgnore(t *testing.T) {
|
||||
assertProjHasEvent(t, proj, "update", argo.EventReasonResourceUpdated)
|
||||
}
|
||||
|
||||
func createAndConfigGlobalProject(ctx context.Context) error {
|
||||
func createAndConfigGlobalProject(ctx context.Context, testName string) error {
|
||||
// Create global project
|
||||
projectGlobalName := "proj-g-" + fixture.Name()
|
||||
projectGlobalName := "proj-g-" + testName
|
||||
_, err := fixture.RunCli("proj", "create", projectGlobalName,
|
||||
"--description", "Test description",
|
||||
"-d", "https://192.168.99.100:8443,default",
|
||||
@@ -512,12 +512,12 @@ func createAndConfigGlobalProject(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func TestGetVirtualProjectNoMatch(t *testing.T) {
|
||||
fixture.EnsureCleanState(t)
|
||||
err := createAndConfigGlobalProject(t.Context())
|
||||
ctx := fixture.EnsureCleanState(t)
|
||||
err := createAndConfigGlobalProject(t.Context(), ctx.GetName())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create project which does not match global project settings
|
||||
projectName := "proj-" + fixture.Name()
|
||||
projectName := "proj-" + ctx.GetName()
|
||||
_, err = fixture.RunCli("proj", "create", projectName,
|
||||
"--description", "Test description",
|
||||
"-d", v1alpha1.KubernetesInternalAPIServerAddr+",*",
|
||||
@@ -529,27 +529,27 @@ func TestGetVirtualProjectNoMatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create an app belongs to proj project
|
||||
_, err = fixture.RunCli("app", "create", fixture.Name(), "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
"--path", guestbookPath, "--project", proj.Name, "--dest-server", v1alpha1.KubernetesInternalAPIServerAddr, "--dest-namespace", fixture.DeploymentNamespace())
|
||||
_, err = fixture.RunCli("app", "create", ctx.GetName(), "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
"--path", guestbookPath, "--project", proj.Name, "--dest-server", v1alpha1.KubernetesInternalAPIServerAddr, "--dest-namespace", ctx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
|
||||
// App trying to sync a resource which is not blacked listed anywhere
|
||||
_, err = fixture.RunCli("app", "sync", fixture.Name(), "--resource", "apps:Deployment:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
_, err = fixture.RunCli("app", "sync", ctx.GetName(), "--resource", "apps:Deployment:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
require.NoError(t, err)
|
||||
|
||||
// app trying to sync a resource which is black listed by global project
|
||||
_, err = fixture.RunCli("app", "sync", fixture.Name(), "--resource", ":Service:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
_, err = fixture.RunCli("app", "sync", ctx.GetName(), "--resource", ":Service:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVirtualProjectMatch(t *testing.T) {
|
||||
fixture.EnsureCleanState(t)
|
||||
testCtx := fixture.EnsureCleanState(t)
|
||||
ctx := t.Context()
|
||||
err := createAndConfigGlobalProject(ctx)
|
||||
err := createAndConfigGlobalProject(ctx, testCtx.GetName())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create project which matches global project settings
|
||||
projectName := "proj-" + fixture.Name()
|
||||
projectName := "proj-" + testCtx.GetName()
|
||||
_, err = fixture.RunCli("proj", "create", projectName,
|
||||
"--description", "Test description",
|
||||
"-d", v1alpha1.KubernetesInternalAPIServerAddr+",*",
|
||||
@@ -566,16 +566,16 @@ func TestGetVirtualProjectMatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create an app belongs to proj project
|
||||
_, err = fixture.RunCli("app", "create", fixture.Name(), "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
"--path", guestbookPath, "--project", proj.Name, "--dest-server", v1alpha1.KubernetesInternalAPIServerAddr, "--dest-namespace", fixture.DeploymentNamespace())
|
||||
_, err = fixture.RunCli("app", "create", testCtx.GetName(), "--repo", fixture.RepoURL(fixture.RepoURLTypeFile),
|
||||
"--path", guestbookPath, "--project", proj.Name, "--dest-server", v1alpha1.KubernetesInternalAPIServerAddr, "--dest-namespace", testCtx.DeploymentNamespace())
|
||||
require.NoError(t, err)
|
||||
|
||||
// App trying to sync a resource which is not blacked listed anywhere
|
||||
_, err = fixture.RunCli("app", "sync", fixture.Name(), "--resource", "apps:Deployment:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
_, err = fixture.RunCli("app", "sync", testCtx.GetName(), "--resource", "apps:Deployment:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
require.ErrorContains(t, err, "blocked by sync window")
|
||||
|
||||
// app trying to sync a resource which is black listed by global project
|
||||
_, err = fixture.RunCli("app", "sync", fixture.Name(), "--resource", ":Service:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
_, err = fixture.RunCli("app", "sync", testCtx.GetName(), "--resource", ":Service:guestbook-ui", "--timeout", strconv.Itoa(10))
|
||||
assert.ErrorContains(t, err, "blocked by sync window")
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user