Compare commits

..

1 Commits

Author SHA1 Message Date
argoproj-renovate[bot]
e32afa8dfb chore(deps): update dependency pygments to v2.20.0 [security]
Signed-off-by: renovate[bot] <renovate[bot]@users.noreply.github.com>
2026-03-29 14:11:20 +00:00
83 changed files with 432 additions and 2185 deletions

View File

@@ -37,7 +37,7 @@ jobs:
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Add ~/go/bin to PATH

View File

@@ -32,7 +32,7 @@ jobs:
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@fee1f7d63c2ff003460e3d139729b119787bc349 # v2.2.2
uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1
with:
app-id: ${{ secrets.CHERRYPICK_APP_ID }}
private-key: ${{ secrets.CHERRYPICK_APP_PRIVATE_KEY }}

View File

@@ -57,7 +57,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Download all Go modules
@@ -77,7 +77,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Restore go build and module cache
@@ -108,7 +108,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Run golangci-lint
@@ -136,7 +136,7 @@ jobs:
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
@@ -204,7 +204,7 @@ jobs:
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
@@ -264,7 +264,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Create symlink in GOPATH
@@ -404,7 +404,7 @@ jobs:
- name: Upload code coverage information to codecov.io
# Only run when the workflow is for upstream (PR target or push is in argoproj/argo-cd).
if: github.repository == 'argoproj/argo-cd'
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6.0.0
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5.5.3
with:
files: test-results/full-coverage.out
fail_ci_if_error: true
@@ -413,7 +413,7 @@ jobs:
- name: Upload test results to Codecov
# Codecov uploads test results to Codecov.io on upstream master branch.
if: github.repository == 'argoproj/argo-cd' && github.ref == 'refs/heads/master' && github.event_name == 'push'
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6.0.0
uses: codecov/codecov-action@1af58845a975a7985b0beb0cbe6fbbb71a41dbad # v5.5.3
with:
files: test-results/junit.xml
fail_ci_if_error: true
@@ -423,7 +423,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
uses: SonarSource/sonarqube-scan-action@299e4b793aaa83bf2aba7c9c14bedbb485688ec4 # v7.1.0
uses: SonarSource/sonarqube-scan-action@a31c9398be7ace6bbfaf30c0bd5d415f843d45e9 # v7.0.0
if: env.sonar_secret != ''
test-e2e:
name: Run end-to-end tests
@@ -466,7 +466,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Set GOPATH
@@ -510,7 +510,7 @@ jobs:
go mod download
- name: Install goreman
run: |
go install github.com/mattn/goreman@v0.3.17
go install github.com/mattn/goreman@latest
- name: Install all tools required for building & testing
run: |
make install-test-tools-local

View File

@@ -44,7 +44,7 @@ jobs:
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version-file: go.mod

View File

@@ -67,26 +67,16 @@ jobs:
if: ${{ github.ref_type != 'tag'}}
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ inputs.go-version }}
cache: false
- name: Install cosign
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1
uses: sigstore/cosign-installer@ba7bc0a3fef59531c69a25acd34668d6d3fe6f22 # v4.1.0
- name: Setup QEMU
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
with:
image: tonistiigi/binfmt@sha256:d3b963f787999e6c0219a48dba02978769286ff61a5f4d26245cb6a6e5567ea3 #qemu-v10.0.4
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
with:
# buildkit v0.28.1
driver-opts: |
image=moby/buildkit@sha256:a82d1ab899cda51aade6fe818d71e4b58c4079e047a0cf29dbb93b2b0465ea69
- uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0
- uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Setup tags for container image as a CSV type
run: |

View File

@@ -133,7 +133,7 @@ jobs:
run: git fetch --force --tags
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
cache: false
@@ -162,7 +162,7 @@ jobs:
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7.0.0
id: run-goreleaser
with:
version: v2.14.3
version: latest
args: release --clean --timeout 55m
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -219,7 +219,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Golang
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0
with:
go-version: ${{ env.GOLANG_VERSION }}
cache: false

View File

@@ -27,7 +27,7 @@ jobs:
# has been updated (see it's numeric version in action.yaml)
# and update `renovate-version` parameter accordingly
- name: Self-hosted Renovate
uses: renovatebot/github-action@3633cede7d4d4598438e654eac4a695e46004420 #46.1.7
uses: renovatebot/github-action@68a3ea99af6ad249940b5a9fdf44fc6d7f14378b #46.1.6
with:
configurationFile: .github/configs/renovate-config.js
token: '${{ steps.get_token.outputs.token }}'

View File

@@ -487,7 +487,7 @@ test-e2e:
test-e2e-local: cli-local
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
export GO111MODULE=off
ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS=$${ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS:-true} DIST_DIR=${DIST_DIR} RERUN_FAILS=$(ARGOCD_E2E_RERUN_FAILS) PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
DIST_DIR=${DIST_DIR} RERUN_FAILS=$(ARGOCD_E2E_RERUN_FAILS) PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
# Spawns a shell in the test server container for debugging purposes
debug-test-server: test-tools-image

View File

@@ -10,5 +10,5 @@ git-server: test/fixture/testrepos/start-git.sh
helm-registry: test/fixture/testrepos/start-helm-registry.sh
oci-registry: test/fixture/testrepos/start-authenticated-helm-registry.sh
dev-mounter: [ "$ARGOCD_E2E_TEST" != "true" ] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/applicationset-controller} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS=${ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS:-true} $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/applicationset-controller} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"

View File

@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"

View File

@@ -8,7 +8,7 @@ import (
"strings"
"text/tabwriter"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
"github.com/argoproj/argo-cd/v3/util/templates"

View File

@@ -40,10 +40,6 @@ var appSetExample = templates.Examples(`
# Delete an ApplicationSet
argocd appset delete APPSETNAME (APPSETNAME...)
# Namespace precedence for --appset-namespace (-N):
# - get/delete: if the argument is namespace/name, that namespace wins; -N is ignored.
# - create/generate: metadata.namespace in the YAML wins when set; -N applies only when the manifest omits namespace.
`)
// NewAppSetCommand returns a new instance of an `argocd appset` command
@@ -68,9 +64,8 @@ func NewAppSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
// NewApplicationSetGetCommand returns a new instance of an `argocd appset get` command
func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
output string
showParams bool
appSetNamespace string
output string
showParams bool
)
command := &cobra.Command{
Use: "get APPSETNAME",
@@ -78,13 +73,6 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
Example: templates.Examples(`
# Get ApplicationSets
argocd appset get APPSETNAME
# Get ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset get APPSET_NAMESPACE/APPSETNAME
# Get ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset get --appset-namespace=APPSET_NAMESPACE APPSETNAME
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -97,7 +85,7 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
conn, appIf := acdClient.NewApplicationSetClientOrDie()
defer utilio.Close(conn)
appSetName, appSetNs := argo.ParseFromQualifiedName(args[0], appSetNamespace)
appSetName, appSetNs := argo.ParseFromQualifiedName(args[0], "")
appSet, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appSetName, AppsetNamespace: appSetNs})
errors.CheckError(err)
@@ -125,7 +113,6 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
command.Flags().BoolVar(&showParams, "show-params", false, "Show ApplicationSet parameters and overrides")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Only get ApplicationSet from a namespace (ignored when qualified name is provided)")
return command
}
@@ -134,7 +121,6 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
var (
output string
upsert, dryRun, wait bool
appSetNamespace string
)
command := &cobra.Command{
Use: "create",
@@ -143,9 +129,6 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
# Create ApplicationSets
argocd appset create <filename or URL> (<filename or URL>...)
# Create ApplicationSet in a specific namespace using
argocd appset create --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
# Dry-run AppSet creation to see what applications would be managed
argocd appset create --dry-run <filename or URL> -o json | jq -r '.status.resources[].name'
`),
@@ -174,11 +157,6 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
conn, appIf := argocdClient.NewApplicationSetClientOrDie()
defer utilio.Close(conn)
if appset.Namespace == "" && appSetNamespace != "" {
fmt.Printf("ApplicationSet YAML file does not have namespace; using --appset-namespace=%q.\n", appSetNamespace)
appset.Namespace = appSetNamespace
}
// Get app before creating to see if it is being updated or no change
existing, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appset.Name, AppsetNamespace: appset.Namespace})
if grpc.UnwrapGRPCStatus(err).Code() != codes.NotFound {
@@ -240,23 +218,18 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
command.Flags().BoolVar(&dryRun, "dry-run", false, "Allows to evaluate the ApplicationSet template on the server to get a preview of the applications that would be created")
command.Flags().BoolVar(&wait, "wait", false, "Wait until the ApplicationSet's resources are up to date. Will block indefinitely if the ApplicationSet has errors")
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Namespace where the ApplicationSet will be created in (ignored when provided YAML file has namespace set in metadata)")
return command
}
// NewApplicationSetGenerateCommand returns a new instance of an `argocd appset generate` command
func NewApplicationSetGenerateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var output string
var appSetNamespace string
command := &cobra.Command{
Use: "generate",
Short: "Generate apps of ApplicationSet rendered templates",
Example: templates.Examples(`
# Generate apps of ApplicationSet rendered templates
argocd appset generate <filename or URL> (<filename or URL>...)
# Generate apps of ApplicationSet rendered templates in a specific namespace
argocd appset generate --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -279,11 +252,6 @@ func NewApplicationSetGenerateCommand(clientOpts *argocdclient.ClientOptions) *c
errors.Fatal(errors.ErrorGeneric, fmt.Sprintf("Error generating apps for ApplicationSet %s. ApplicationSet does not have Name field set", appset))
}
if appset.Namespace == "" && appSetNamespace != "" {
fmt.Printf("ApplicationSet YAML file does not have namespace; using --appset-namespace=%q.\n", appSetNamespace)
appset.Namespace = appSetNamespace
}
conn, appIf := argocdClient.NewApplicationSetClientOrDie()
defer utilio.Close(conn)
@@ -318,7 +286,6 @@ func NewApplicationSetGenerateCommand(clientOpts *argocdclient.ClientOptions) *c
},
}
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Namespace used for generating Applications (ignored when provided YAML file has namespace set in metadata)")
return command
}
@@ -371,9 +338,8 @@ func NewApplicationSetListCommand(clientOpts *argocdclient.ClientOptions) *cobra
// NewApplicationSetDeleteCommand returns a new instance of an `argocd appset delete` command
func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var (
noPrompt bool
wait bool
appSetNamespace string
noPrompt bool
wait bool
)
command := &cobra.Command{
Use: "delete",
@@ -381,12 +347,6 @@ func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cob
Example: templates.Examples(`
# Delete an applicationset
argocd appset delete APPSETNAME (APPSETNAME...)
# Delete ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset delete APPSET_NAMESPACE/APPSETNAME
# Delete ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset delete --appset-namespace=APPSET_NAMESPACE APPSETNAME
`),
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
@@ -415,7 +375,7 @@ func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cob
promptUtil := utils.NewPrompt(isTerminal && !noPrompt)
for _, appSetQualifiedName := range args {
appSetName, appSetNs := argo.ParseFromQualifiedName(appSetQualifiedName, appSetNamespace)
appSetName, appSetNs := argo.ParseFromQualifiedName(appSetQualifiedName, "")
appsetDeleteReq := applicationset.ApplicationSetDeleteRequest{
Name: appSetName,
@@ -452,7 +412,6 @@ func NewApplicationSetDeleteCommand(clientOpts *argocdclient.ClientOptions) *cob
}
command.Flags().BoolVarP(&noPrompt, "yes", "y", false, "Turn off prompting to confirm cascaded deletion of Application resources")
command.Flags().BoolVar(&wait, "wait", false, "Wait until deletion of the applicationset(s) completes")
command.Flags().StringVarP(&appSetNamespace, "appset-namespace", "N", "", "Namespace where the ApplicationSet will be deleted from (ignored when qualified name is provided)")
return command
}

View File

@@ -10,7 +10,7 @@ import (
"github.com/Masterminds/sprig/v3"
log "github.com/sirupsen/logrus"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
@@ -102,6 +102,9 @@ func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *app
}
}
// if no manifest changes then skip commit
if !atleastOneManifestChanged {
return false, nil
}
return atleastOneManifestChanged, nil
}
@@ -137,13 +140,11 @@ func writeReadme(root *os.Root, dirPath string, metadata hydrator.HydratorCommit
if err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create README file: %w", err)
}
defer func() {
err := readmeFile.Close()
if err != nil {
log.WithError(err).Error("failed to close README file")
}
}()
err = readmeTemplate.Execute(readmeFile, metadata)
closeErr := readmeFile.Close()
if closeErr != nil {
log.WithError(closeErr).Error("failed to close README file")
}
if err != nil {
return fmt.Errorf("failed to execute readme template: %w", err)
}

View File

@@ -2690,7 +2690,7 @@ func (ctrl *ApplicationController) applyImpersonationConfig(config *rest.Config,
if !impersonationEnabled {
return nil
}
user, err := settings_util.DeriveServiceAccountToImpersonate(proj, app, destCluster)
user, err := deriveServiceAccountToImpersonate(proj, app, destCluster)
if err != nil {
return fmt.Errorf("error deriving service account to impersonate: %w", err)
}

View File

@@ -132,11 +132,11 @@ func (c *clusterInfoUpdater) getUpdatedClusterInfo(ctx context.Context, apps []*
continue
}
}
destServer, err := argo.GetDestinationServer(ctx, a.Spec.Destination, c.db)
destCluster, err := argo.GetDestinationCluster(ctx, a.Spec.Destination, c.db)
if err != nil {
continue
}
if destServer == cluster.Server {
if destCluster.Server == cluster.Server {
appCount++
}
}

View File

@@ -101,121 +101,6 @@ func TestClusterSecretUpdater(t *testing.T) {
}
}
func TestGetUpdatedClusterInfo_AppCount(t *testing.T) {
const fakeNamespace = "fake-ns"
const clusterServer = "https://prod.example.com"
const clusterName = "prod"
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string]string{},
}
argoCDSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string][]byte{"admin.password": nil, "server.secretkey": nil},
}
clusterSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "prod-cluster",
Namespace: fakeNamespace,
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
Annotations: map[string]string{
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
},
},
Data: map[string][]byte{
"name": []byte(clusterName),
"server": []byte(clusterServer),
"config": []byte("{}"),
},
}
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecret, clusterSecret)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
argoDB := db.NewDB(fakeNamespace, settingsManager, kubeclientset)
apps := []*v1alpha1.Application{
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Name: clusterName}}},
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Server: clusterServer}}},
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Server: "https://other.example.com"}}},
}
updater := &clusterInfoUpdater{db: argoDB, namespace: fakeNamespace}
cluster := v1alpha1.Cluster{Server: clusterServer}
info := updater.getUpdatedClusterInfo(t.Context(), apps, cluster, nil, metav1.Now())
assert.Equal(t, int64(2), info.ApplicationsCount)
}
func TestGetUpdatedClusterInfo_AmbiguousName(t *testing.T) {
const fakeNamespace = "fake-ns"
const clusterServer = "https://prod.example.com"
const clusterName = "prod"
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string]string{},
}
argoCDSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string][]byte{"admin.password": nil, "server.secretkey": nil},
}
makeClusterSecret := func(secretName, server string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: fakeNamespace,
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
Annotations: map[string]string{
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
},
},
Data: map[string][]byte{
"name": []byte(clusterName),
"server": []byte(server),
"config": []byte("{}"),
},
}
}
// Two secrets share the same cluster name
kubeclientset := fake.NewClientset(
emptyArgoCDConfigMap, argoCDSecret,
makeClusterSecret("prod-cluster-1", clusterServer),
makeClusterSecret("prod-cluster-2", "https://prod2.example.com"),
)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
argoDB := db.NewDB(fakeNamespace, settingsManager, kubeclientset)
apps := []*v1alpha1.Application{
{Spec: v1alpha1.ApplicationSpec{Destination: v1alpha1.ApplicationDestination{Name: clusterName}}},
}
updater := &clusterInfoUpdater{db: argoDB, namespace: fakeNamespace}
cluster := v1alpha1.Cluster{Server: clusterServer}
info := updater.getUpdatedClusterInfo(t.Context(), apps, cluster, nil, metav1.Now())
assert.Equal(t, int64(0), info.ApplicationsCount, "ambiguous name should not count app")
}
func TestUpdateClusterLabels(t *testing.T) {
shouldNotBeInvoked := func(_ context.Context, _ *v1alpha1.Cluster) (*v1alpha1.Cluster, error) {
shouldNotHappen := errors.New("if an error happens here, something's wrong")

View File

@@ -847,10 +847,11 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
if err != nil {
log.Errorf("CompareAppState error getting server side diff dry run applier: %s", err)
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
} else {
defer cleanup()
diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(applier))
}
if cleanup != nil {
defer cleanup()
}
diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(applier))
}
// enable structured merge diff if application syncs with server-side apply

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"os"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/strategicpatch"
@@ -32,16 +33,20 @@ import (
applog "github.com/argoproj/argo-cd/v3/util/app/log"
"github.com/argoproj/argo-cd/v3/util/argo"
"github.com/argoproj/argo-cd/v3/util/argo/diff"
"github.com/argoproj/argo-cd/v3/util/glob"
kubeutil "github.com/argoproj/argo-cd/v3/util/kube"
logutils "github.com/argoproj/argo-cd/v3/util/log"
"github.com/argoproj/argo-cd/v3/util/lua"
"github.com/argoproj/argo-cd/v3/util/settings"
)
const (
// EnvVarSyncWaveDelay is an environment variable which controls the delay in seconds between
// each sync-wave
EnvVarSyncWaveDelay = "ARGOCD_SYNC_WAVE_DELAY"
// serviceAccountDisallowedCharSet contains the characters that are not allowed to be present
// in a DefaultServiceAccount configured for a DestinationServiceAccount
serviceAccountDisallowedCharSet = "!*[]{}\\/"
)
func (m *appStateManager) getOpenAPISchema(server *v1alpha1.Cluster) (openapi.Resources, error) {
@@ -283,7 +288,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, project *v1alp
return
}
if impersonationEnabled {
serviceAccountToImpersonate, err := settings.DeriveServiceAccountToImpersonate(project, app, destCluster)
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(project, app, destCluster)
if err != nil {
state.Phase = common.OperationError
state.Message = fmt.Sprintf("failed to find a matching service account to impersonate: %v", err)
@@ -553,6 +558,41 @@ func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject
return !canSync, nil
}
// deriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application, destCluster *v1alpha1.Cluster) (string, error) {
// spec.Destination.Namespace is optional. If not specified, use the Application's
// namespace
serviceAccountNamespace := application.Spec.Destination.Namespace
if serviceAccountNamespace == "" {
serviceAccountNamespace = application.Namespace
}
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
// if so, return the service account specified for that destination.
for _, item := range project.Spec.DestinationServiceAccounts {
dstServerMatched, err := glob.MatchWithError(item.Server, destCluster.Server)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
}
dstNamespaceMatched, err := glob.MatchWithError(item.Namespace, application.Spec.Destination.Namespace)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination namespace: %w", err)
}
if dstServerMatched && dstNamespaceMatched {
if strings.Trim(item.DefaultServiceAccount, " ") == "" || strings.ContainsAny(item.DefaultServiceAccount, serviceAccountDisallowedCharSet) {
return "", fmt.Errorf("default service account contains invalid chars '%s'", item.DefaultServiceAccount)
} else if strings.Contains(item.DefaultServiceAccount, ":") {
// service account is specified along with its namespace.
return "system:serviceaccount:" + item.DefaultServiceAccount, nil
}
// service account needs to be prefixed with a namespace
return fmt.Sprintf("system:serviceaccount:%s:%s", serviceAccountNamespace, item.DefaultServiceAccount), nil
}
}
// if there is no match found in the AppProject.Spec.DestinationServiceAccounts, use the default service account of the destination namespace.
return "", fmt.Errorf("no matching service account found for destination server %s and namespace %s", application.Spec.Destination.Server, serviceAccountNamespace)
}
// validateSyncPermissions checks whether the given resource is permitted by the project's
// allow/deny lists and destination rules. It returns an error if the API resource info is nil
// (preventing a nil-pointer panic), if the resource's group/kind is not permitted, or if

View File

@@ -22,7 +22,6 @@ import (
"github.com/argoproj/argo-cd/v3/test"
"github.com/argoproj/argo-cd/v3/util/argo/diff"
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
"github.com/argoproj/argo-cd/v3/util/settings"
)
func TestPersistRevisionHistory(t *testing.T) {
@@ -727,7 +726,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should be an error saying no valid match was found
@@ -751,7 +750,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
@@ -790,7 +789,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be no error and should use the right service account for impersonation
require.NoError(t, err)
@@ -829,7 +828,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be no error and it should use the first matching service account for impersonation
require.NoError(t, err)
@@ -863,7 +862,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and should use the first matching glob pattern service account for impersonation
require.NoError(t, err)
@@ -898,7 +897,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should be an error saying no match was found
require.EqualError(t, err, expectedErrMsg)
@@ -926,7 +925,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the service account configured for with empty namespace should be used.
require.NoError(t, err)
@@ -960,7 +959,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the catch all service account should be returned
require.NoError(t, err)
@@ -984,7 +983,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination namespace")
@@ -1018,7 +1017,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.
@@ -1046,7 +1045,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
f.application.Spec.Destination.Name = f.cluster.Name
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.
@@ -1129,7 +1128,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the right service account must be returned.
require.NoError(t, err)
@@ -1168,7 +1167,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and first matching service account should be used
require.NoError(t, err)
@@ -1202,7 +1201,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account of the glob pattern, being the first match should be returned.
@@ -1237,7 +1236,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
// then, there an error with appropriate message must be returned
require.EqualError(t, err, expectedErr)
@@ -1271,7 +1270,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there should not be any error and the service account of the glob pattern match must be returned.
require.NoError(t, err)
@@ -1295,7 +1294,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
// then, there must be an error as the glob pattern is invalid.
require.ErrorContains(t, err, "invalid glob pattern for destination server")
@@ -1329,7 +1328,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
// then, there should not be any error and the service account with the given namespace prefix must be returned.
require.NoError(t, err)
@@ -1357,7 +1356,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
f.application.Spec.Destination.Name = f.cluster.Name
// when
sa, err := settings.DeriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
assert.Equal(t, expectedSA, sa)
// then, there should not be any error and the service account with its namespace should be returned.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 MiB

After

Width:  |  Height:  |  Size: 3.0 MiB

View File

@@ -30,7 +30,7 @@ Impersonation requests first authenticate as the requesting user, then switch to
### Feature scope
Impersonation is supported for the lifecycle of objects managed by an Application directly, which includes sync operations (creation, update and pruning of resources) and deletion as part of Application finalizer logic. It is also supported for UI operations triggered by the user.
Impersonation is currently only supported for the lifecycle of objects managed by an Application directly, which includes sync operations (creation, update and pruning of resources) and deletion as part of Application finalizer logic. This *does not* includes operations triggered via ArgoCD's UI, which will still be executed with Argo CD's control-plane service account.
## Prerequisites

View File

@@ -14,7 +14,7 @@ The Progressive Syncs feature set is intended to be light and flexible. The feat
- Progressive Syncs watch for the managed Application resources to become "Healthy" before proceeding to the next stage.
- Deployments, DaemonSets, StatefulSets, and [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) are all supported, because the Application enters a "Progressing" state while pods are being rolled out. In fact, any resource with a health check that can report a "Progressing" status is supported.
- [Argo CD Resource Hooks](../../user-guide/sync-waves.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
- [Argo CD Resource Hooks](../../user-guide/resource_hooks.md) are supported. We recommend this approach for users that need advanced functionality when an Argo Rollout cannot be used, such as smoke testing after a DaemonSet change.
## Enabling Progressive Syncs

View File

@@ -1,40 +0,0 @@
# v3.4 to 3.5
## Breaking Changes
## Behavioral Improvements / Fixes
### Impersonation extended to server operations
When [impersonation](../app-sync-using-impersonation.md) is enabled, it now applies to all API server operations, not just sync operations. This means that actions triggered through the UI or API (viewing logs, listing events, deleting resources, running resource actions, etc.) will use the impersonated service account derived from the AppProject's `destinationServiceAccounts` configuration.
Previously, impersonation only applied to sync operations.
**Affected operations and required permissions:**
| Operation | Kubernetes API call | Required RBAC verbs |
|---|---|---|
| Get resource | `GET` on the target resource | `get` |
| Patch resource | `PATCH` on the target resource | `get`, `patch` |
| Delete resource | `DELETE` on the target resource | `delete` |
| List resource events | `LIST` on `events` (core/v1) | `list` |
| View pod logs | `GET` on `pods` and `pods/log` | `get` |
| Run resource action | `GET`, `CREATE`, `PATCH` on the target resource | `get`, `create`, `patch` |
This list covers built-in operations. Custom resource actions may require additional permissions depending on what Kubernetes API calls they make.
Users with impersonation enabled must ensure the service accounts configured in `destinationServiceAccounts` have permissions for these operations.
No action is required for users who do not have impersonation enabled.
## API Changes
## Security Changes
## Deprecated Items
## Kustomize Upgraded
## Helm Upgraded
## Custom Healthchecks Added

View File

@@ -39,7 +39,6 @@ kubectl apply -n argocd --server-side --force-conflicts -f https://raw.githubuse
<hr/>
- [v3.4 to v3.5](./3.4-3.5.md)
- [v3.3 to v3.4](./3.3-3.4.md)
- [v3.2 to v3.3](./3.2-3.3.md)
- [v3.1 to v3.2](./3.1-3.2.md)

View File

@@ -1,21 +1,21 @@
# Keycloak
Keycloak and Argo CD integration can be configured in two ways with Client authentication and with PKCE.
Keycloak and ArgoCD integration can be configured in two ways with Client authentication and with PKCE.
If you need to authenticate with __argo-cd command line__, you must choose PKCE way.
* [Keycloak and Argo CD with Client authentication](#keycloak-and-argocd-with-client-authentication)
* [Keycloak and Argo CD with PKCE](#keycloak-and-argocd-with-pkce)
* [Keycloak and ArgoCD with Client authentication](#keycloak-and-argocd-with-client-authentication)
* [Keycloak and ArgoCD with PKCE](#keycloak-and-argocd-with-pkce)
## Keycloak and Argo CD with Client authentication
## Keycloak and ArgoCD with Client authentication
These instructions will take you through the entire process of getting your Argo CD application to authenticate with Keycloak.
These instructions will take you through the entire process of getting your ArgoCD application authenticating with Keycloak.
Start by creating a client within Keycloak and configure Argo CD to use Keycloak for authentication, using groups set in Keycloak
You will create a client within Keycloak and configure ArgoCD to use Keycloak for authentication, using groups set in Keycloak
to determine privileges in Argo.
### Creating a new client in Keycloak
First, setup a new client.
First we need to setup a new client.
Start by logging into your keycloak server, select the realm you want to use (`master` by default)
and then go to __Clients__ and click the __Create client__ button at the top.
@@ -37,11 +37,11 @@ but it's not recommended in production).
Make sure to click __Save__.
There should be a tab called __Credentials__. You can copy the Client Secret that we'll use in our Argo CD configuration.
There should be a tab called __Credentials__. You can copy the Client Secret that we'll use in our ArgoCD configuration.
![Keycloak client secret](../../assets/keycloak-client-secret.png "Keycloak client secret")
### Configuring Argo CD OIDC
### Configuring ArgoCD OIDC
Let's start by storing the client secret you generated earlier in the argocd secret _argocd-secret_.
@@ -68,7 +68,7 @@ data:
clientID: argocd
clientSecret: $oidc.keycloak.clientSecret
refreshTokenThreshold: 2m
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
requestedScopes: ["openid", "profile", "email", "groups"]
```
Make sure that:
@@ -80,18 +80,18 @@ Make sure that:
- __requestedScopes__ contains the _groups_ claim if you didn't add it to the Default scopes
- __refreshTokenThreshold__ is less than the client token lifetime. If this setting is not less than the token lifetime, a new token will be obtained for every request. Keycloak sets the client token lifetime to 5 minutes by default.
## Keycloak and Argo CD with PKCE
## Keycloak and ArgoCD with PKCE
These instructions will take you through the entire process of getting your Argo CD application authenticating with Keycloak.
These instructions will take you through the entire process of getting your ArgoCD application authenticating with Keycloak.
You will create a client within Keycloak and configure Argo CD to use Keycloak for authentication, using groups set in Keycloak
You will create a client within Keycloak and configure ArgoCD to use Keycloak for authentication, using groups set in Keycloak
to determine privileges in Argo.
You will also be able to authenticate using argo-cd command line.
### Creating a new client in Keycloak
First, setup a new client.
First we need to setup a new client.
Start by logging into your keycloak server, select the realm you want to use (`master` by default)
and then go to __Clients__ and click the __Create client__ button at the top.
@@ -119,7 +119,7 @@ Now go to a tab called __Advanced__, look for parameter named __Proof Key for Co
![Keycloak configure client Step 2](../../assets/keycloak-configure-client-pkce_2.png "Keycloak configure client Step 2")
Make sure to click __Save__.
### Configuring Argo CD OIDC
### Configuring ArgoCD OIDC
Now we can configure the config map and add the oidc configuration to enable our keycloak authentication.
You can use `$ kubectl edit configmap argocd-cm`.
@@ -138,7 +138,7 @@ data:
clientID: argocd
enablePKCEAuthentication: true
refreshTokenThreshold: 2m
requestedScopes: ["openid", "profile", "email", "groups", "offline_access"]
requestedScopes: ["openid", "profile", "email", "groups"]
```
Make sure that:
@@ -146,13 +146,13 @@ Make sure that:
- __issuer__ ends with the correct realm (in this example _master_)
- __issuer__ on Keycloak releases older than version 17 the URL must include /auth (in this example /auth/realms/master)
- __clientID__ is set to the Client ID you configured in Keycloak
- __enablePKCEAuthentication__ must be set to true to enable correct Argo CD behaviour with PKCE
- __enablePKCEAuthentication__ must be set to true to enable correct ArgoCD behaviour with PKCE
- __requestedScopes__ contains the _groups_ claim if you didn't add it to the Default scopes
- __refreshTokenThreshold__ is less than the client token lifetime. If this setting is not less than the token lifetime, a new token will be obtained for every request. Keycloak sets the client token lifetime to 5 minutes by default.
## Configuring the groups claim
In order for Argo CD to provide the groups the user is in we need to configure a groups claim that can be included in the authentication token.
In order for ArgoCD to provide the groups the user is in we need to configure a groups claim that can be included in the authentication token.
To do this we'll start by creating a new __Client Scope__ called _groups_.
@@ -174,7 +174,7 @@ Go back to the client we've created earlier and go to the Tab "Client Scopes".
Click on "Add client scope", choose the _groups_ scope and add it either to the __Default__ or to the __Optional__ Client Scope.
If you put it in the Optional
category you will need to make sure that Argo CD requests the scope in its OIDC configuration.
category you will need to make sure that ArgoCD requests the scope in its OIDC configuration.
Since we will always want group information, I recommend
using the Default category.
@@ -184,7 +184,7 @@ Create a group called _ArgoCDAdmins_ and have your current user join the group.
![Keycloak user group](../../assets/keycloak-user-group.png "Keycloak user group")
## Configuring Argo CD Policy
## Configuring ArgoCD Policy
Now that we have an authentication that provides groups we want to apply a policy to these groups.
We can modify the _argocd-rbac-cm_ ConfigMap using `$ kubectl edit configmap argocd-rbac-cm`.
@@ -205,7 +205,7 @@ In this example we give the role _role:admin_ to all users in the group _ArgoCDA
You can now login using our new Keycloak OIDC authentication:
![Keycloak Argo CD login](../../assets/keycloak-login.png "Keycloak Argo CD login")
![Keycloak ArgoCD login](../../assets/keycloak-login.png "Keycloak ArgoCD login")
If you have used PKCE method, you can also authenticate using command line:
```bash
@@ -219,7 +219,7 @@ Once done, you should see
![Authentication successful!](../../assets/keycloak-authentication-successful.png "Authentication successful!")
## Troubleshoot
If Argo CD auth returns 401 or when the login attempt leads to the loop, then restart the argocd-server pod.
If ArgoCD auth returns 401 or when the login attempt leads to the loop, then restart the argocd-server pod.
```
kubectl rollout restart deployment argocd-server -n argocd
```

View File

@@ -4,7 +4,7 @@ mkdocs-github-admonitions-plugin==0.1.1
# Thus pointing to the older version of mkdocs-material.
mkdocs-material==7.1.8
markdown_include==0.8.1
pygments==2.19.2
pygments==2.20.0
jinja2==3.1.6
markdown==3.10
pymdown-extensions==10.17.1

View File

@@ -6,7 +6,7 @@
|--------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| argocd.argoproj.io/application-set-refresh | ApplicationSet | `"true"` | Added when an ApplicationSet is requested to be refreshed by a webhook. The ApplicationSet controller will remove this annotation at the end of reconciliation. |
| argocd.argoproj.io/compare-options | any | [see compare options docs](compare-options.md) | Configures how an app's current state is compared to its desired state. |
| argocd.argoproj.io/hook | any | [see hooks docs](sync-waves.md) | Used to configure [resource hooks](sync-waves.md). |
| argocd.argoproj.io/hook | any | [see resource hooks docs](resource_hooks.md) | Used to configure [resource hooks](resource_hooks.md). |
| argocd.argoproj.io/hook-delete-policy | any | [see sync waves docs](sync-waves.md#hook-lifecycle-and-cleanup) | Used to set a [resource hook's deletion policy](sync-waves.md#hook-lifecycle-and-cleanup). |
| argocd.argoproj.io/manifest-generate-paths | Application | [see scaling docs](../operator-manual/high_availability.md#manifest-paths-annotation) | Used to avoid unnecessary Application refreshes, especially in mono-repos. |
| argocd.argoproj.io/managed-by-url | Application | A valid http(s) URL | Specifies the URL of the Argo CD instance managing the application. Used to correctly link to applications managed by a different Argo CD instance. See [managed-by-url docs](../operator-manual/managed-by-url.md) for details. |

View File

@@ -94,26 +94,6 @@ spec:
> [!NOTE]
> Disabling self-heal does not guarantee that live cluster changes in multi-source applications will persist. Although one of the resource's sources remains unchanged, changes in another can trigger `autosync`. To handle such cases, consider disabling `autosync`.
## Automatic Retry with a limit
Argo CD can automatically retry a failed sync operation using exponential backoff. To enable, configure the `retry` field in the sync policy:
```yaml
spec:
syncPolicy:
retry:
limit: 5 # number of retries (-1 for unlimited retries)
backoff:
duration: 5s # base duration between retries
factor: 2 # exponential backoff factor
maxDuration: 3m # maximum duration between retries
```
- `limit`: number of retry attempts. Set to `-1` for unlimited retries.
- `backoff.duration`: base wait time before the first retry.
- `backoff.factor`: multiplier applied after each failed attempt.
- `backoff.maxDuration`: maximum wait time between retries, regardless of the number of attempts.
## Automatic Retry Refresh on new revisions
This feature allows users to configure their applications to refresh on new revisions when the current sync is retrying. To enable automatic refresh during sync retries, run:

View File

@@ -22,10 +22,6 @@ argocd appset [flags]
# Delete an ApplicationSet
argocd appset delete APPSETNAME (APPSETNAME...)
# Namespace precedence for --appset-namespace (-N):
# - get/delete: if the argument is namespace/name, that namespace wins; -N is ignored.
# - create/generate: metadata.namespace in the YAML wins when set; -N applies only when the manifest omits namespace.
```
### Options

View File

@@ -14,9 +14,6 @@ argocd appset create [flags]
# Create ApplicationSets
argocd appset create <filename or URL> (<filename or URL>...)
# Create ApplicationSet in a specific namespace using
argocd appset create --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
# Dry-run AppSet creation to see what applications would be managed
argocd appset create --dry-run <filename or URL> -o json | jq -r '.status.resources[].name'
```
@@ -24,12 +21,11 @@ argocd appset create [flags]
### Options
```
-N, --appset-namespace string Namespace where the ApplicationSet will be created in (ignored when provided YAML file has namespace set in metadata)
--dry-run Allows to evaluate the ApplicationSet template on the server to get a preview of the applications that would be created
-h, --help help for create
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--upsert Allows to override ApplicationSet with the same name even if supplied ApplicationSet spec is different from existing spec
--wait Wait until the ApplicationSet's resources are up to date. Will block indefinitely if the ApplicationSet has errors
--dry-run Allows to evaluate the ApplicationSet template on the server to get a preview of the applications that would be created
-h, --help help for create
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--upsert Allows to override ApplicationSet with the same name even if supplied ApplicationSet spec is different from existing spec
--wait Wait until the ApplicationSet's resources are up to date. Will block indefinitely if the ApplicationSet has errors
```
### Options inherited from parent commands

View File

@@ -13,21 +13,14 @@ argocd appset delete [flags]
```
# Delete an applicationset
argocd appset delete APPSETNAME (APPSETNAME...)
# Delete ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset delete APPSET_NAMESPACE/APPSETNAME
# Delete ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset delete --appset-namespace=APPSET_NAMESPACE APPSETNAME
```
### Options
```
-N, --appset-namespace string Namespace where the ApplicationSet will be deleted from (ignored when qualified name is provided)
-h, --help help for delete
--wait Wait until deletion of the applicationset(s) completes
-y, --yes Turn off prompting to confirm cascaded deletion of Application resources
-h, --help help for delete
--wait Wait until deletion of the applicationset(s) completes
-y, --yes Turn off prompting to confirm cascaded deletion of Application resources
```
### Options inherited from parent commands

View File

@@ -13,17 +13,13 @@ argocd appset generate [flags]
```
# Generate apps of ApplicationSet rendered templates
argocd appset generate <filename or URL> (<filename or URL>...)
# Generate apps of ApplicationSet rendered templates in a specific namespace
argocd appset generate --appset-namespace=APPSET_NAMESPACE <filename or URL> (<filename or URL>...)
```
### Options
```
-N, --appset-namespace string Namespace used for generating Applications (ignored when provided YAML file has namespace set in metadata)
-h, --help help for generate
-o, --output string Output format. One of: json|yaml|wide (default "wide")
-h, --help help for generate
-o, --output string Output format. One of: json|yaml|wide (default "wide")
```
### Options inherited from parent commands

View File

@@ -13,21 +13,14 @@ argocd appset get APPSETNAME [flags]
```
# Get ApplicationSets
argocd appset get APPSETNAME
# Get ApplicationSet in a specific namespace using qualified name (namespace/name)
argocd appset get APPSET_NAMESPACE/APPSETNAME
# Get ApplicationSet in a specific namespace using --appset-namespace flag
argocd appset get --appset-namespace=APPSET_NAMESPACE APPSETNAME
```
### Options
```
-N, --appset-namespace string Only get ApplicationSet from a namespace (ignored when qualified name is provided)
-h, --help help for get
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--show-params Show ApplicationSet parameters and overrides
-h, --help help for get
-o, --output string Output format. One of: json|yaml|wide (default "wide")
--show-params Show ApplicationSet parameters and overrides
```
### Options inherited from parent commands

View File

@@ -500,7 +500,7 @@ source:
## Helm Hooks
Helm hooks are similar to [Argo CD hooks](sync-waves.md). In Helm, a hook
Helm hooks are similar to [Argo CD hooks](resource_hooks.md). In Helm, a hook
is any normal Kubernetes resource annotated with the `helm.sh/hook` annotation.
Argo CD supports many (most?) Helm hooks by mapping the Helm annotations onto Argo CD's own hook annotations:
@@ -541,7 +541,7 @@ Unsupported hooks are ignored. In Argo CD, hooks are created by using `kubectl a
* Annotate `pre-install` and `post-install` with `hook-weight: "-1"`. This will make sure it runs to success before any upgrade hooks.
* Annotate `pre-upgrade` and `post-upgrade` with `hook-delete-policy: before-hook-creation` to make sure it runs on every sync.
Read more about [Argo hooks](sync-waves.md) and [Helm hooks](https://helm.sh/docs/topics/charts_hooks/).
Read more about [Argo hooks](resource_hooks.md) and [Helm hooks](https://helm.sh/docs/topics/charts_hooks/).
## Random Data

View File

@@ -7,7 +7,7 @@ A *selective sync* is one where only some resources are sync'd. You can choose w
When doing so, bear in mind that:
* Your sync is not recorded in the history, and so rollback is not possible.
* [Hooks](sync-waves.md) are not run.
* [Hooks](resource_hooks.md) are not run.
## Selective Sync Option

41
go.mod
View File

@@ -35,7 +35,7 @@ require (
github.com/gfleury/go-bitbucket-v1 v0.0.0-20240917142304-df385efaac68
// DO NOT BUMP UNTIL go-git/go-git#1551 is fixed
github.com/go-git/go-git/v5 v5.14.0
github.com/go-jose/go-jose/v4 v4.1.4
github.com/go-jose/go-jose/v4 v4.1.3
github.com/go-logr/logr v1.4.3
github.com/go-openapi/loads v0.23.3
github.com/go-openapi/runtime v0.29.3
@@ -88,7 +88,7 @@ require (
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
github.com/valyala/fasttemplate v1.2.2
github.com/yuin/gopher-lua v1.1.2
github.com/yuin/gopher-lua v1.1.1
gitlab.com/gitlab-org/api/client-go v1.46.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0
@@ -96,7 +96,6 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0
go.opentelemetry.io/otel/sdk v1.42.0
go.opentelemetry.io/otel/trace v1.42.0
go.yaml.in/yaml/v3 v3.0.4
golang.org/x/crypto v0.49.0
golang.org/x/net v0.52.0
golang.org/x/oauth2 v0.36.0
@@ -107,6 +106,7 @@ require (
google.golang.org/grpc v1.79.3
google.golang.org/protobuf v1.36.11
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.34.0
k8s.io/apiextensions-apiserver v0.34.0
k8s.io/apimachinery v0.34.0
@@ -147,20 +147,20 @@ require (
github.com/PagerDuty/go-pagerduty v1.8.0 // indirect
github.com/ProtonMail/go-crypto v1.1.6 // indirect
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20240116134246-a8cbe886bab0 // indirect
github.com/aws/aws-sdk-go-v2 v1.41.5
github.com/aws/aws-sdk-go-v2/config v1.32.13
github.com/aws/aws-sdk-go-v2/credentials v1.19.13
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect
github.com/aws/aws-sdk-go-v2 v1.41.4
github.com/aws/aws-sdk-go-v2/config v1.32.11
github.com/aws/aws-sdk-go-v2/credentials v1.19.12
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.14 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.10
github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.9
github.com/aws/smithy-go v1.24.2
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -280,6 +280,7 @@ require (
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.33.0 // indirect
golang.org/x/sys v0.42.0 // indirect
golang.org/x/text v0.35.0 // indirect
@@ -313,15 +314,12 @@ require (
)
require (
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.12
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.11
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.8
github.com/oklog/ulid/v2 v2.1.1 // indirect
)
require (
github.com/google/go-github/v84 v84.0.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
require github.com/google/go-github/v84 v84.0.0 // indirect
replace (
github.com/golang/protobuf => github.com/golang/protobuf v1.5.4
@@ -331,6 +329,9 @@ replace (
// Avoid CVE-2022-3064
gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0
// Avoid CVE-2022-28948
gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
k8s.io/api => k8s.io/api v0.34.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.34.0
k8s.io/apimachinery => k8s.io/apimachinery v0.34.0

62
go.sum
View File

@@ -124,38 +124,38 @@ github.com/argoproj/pkg/v2 v2.0.1/go.mod h1:sdifF6sUTx9ifs38ZaiNMRJuMpSCBB9GulHf
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.44.39/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY=
github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
github.com/aws/aws-sdk-go-v2/config v1.32.13 h1:5KgbxMaS2coSWRrx9TX/QtWbqzgQkOdEa3sZPhBhCSg=
github.com/aws/aws-sdk-go-v2/config v1.32.13/go.mod h1:8zz7wedqtCbw5e9Mi2doEwDyEgHcEE9YOJp6a8jdSMY=
github.com/aws/aws-sdk-go-v2/credentials v1.19.13 h1:mA59E3fokBvyEGHKFdnpNNrvaR351cqiHgRg+JzOSRI=
github.com/aws/aws-sdk-go-v2/credentials v1.19.13/go.mod h1:yoTXOQKea18nrM69wGF9jBdG4WocSZA1h38A+t/MAsk=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 h1:NUS3K4BTDArQqNu2ih7yeDLaS3bmHD0YndtA6UP884g=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21/go.mod h1:YWNWJQNjKigKY1RHVJCuupeWDrrHjRqHm0N9rdrWzYI=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY=
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.12 h1:yv3mfWt/eiDTTry6fkN5hh8wHJfU5ygnw+DJp10C0/c=
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.12/go.mod h1:voO3LP/dZ4CTERiNWCz3DFLbK/8hbfeC1OJkLW+sang=
github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k=
github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
github.com/aws/aws-sdk-go-v2/config v1.32.11 h1:ftxI5sgz8jZkckuUHXfC/wMUc8u3fG1vQS0plr2F2Zs=
github.com/aws/aws-sdk-go-v2/config v1.32.11/go.mod h1:twF11+6ps9aNRKEDimksp923o44w/Thk9+8YIlzWMmo=
github.com/aws/aws-sdk-go-v2/credentials v1.19.12 h1:oqtA6v+y5fZg//tcTWahyN9PEn5eDU/Wpvc2+kJ4aY8=
github.com/aws/aws-sdk-go-v2/credentials v1.19.12/go.mod h1:U3R1RtSHx6NB0DvEQFGyf/0sbrpJrluENHdPy1j/3TE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 h1:zOgq3uezl5nznfoK3ODuqbhVg1JzAGDUhXOsU0IDCAo=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20/go.mod h1:z/MVwUARehy6GAg/yQ1GO2IMl0k++cu1ohP9zo887wE=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 h1:CNXO7mvgThFGqOFgbNAP2nol2qAWBOGfqR/7tQlvLmc=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20/go.mod h1:oydPDJKcfMhgfcgBUZaG+toBbwy8yPWubJXBVERtI4o=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 h1:tN6W/hg+pkM+tf9XDkWUbDEjGLb+raoBMFsTodcoYKw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20/go.mod h1:YJ898MhD067hSHA6xYCx5ts/jEd8BSOLtQDL3iZsvbc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 h1:clHU5fm//kWS1C2HgtgWxfQbFbx4b6rx+5jzhgX9HrI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY=
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.11 h1:R3S5odXTsflG7xUp9S2AsewSXtQi1LBd+stJ5OpCIog=
github.com/aws/aws-sdk-go-v2/service/codecommit v1.33.11/go.mod h1:OekzWXyZi3ptl+YoKmm+G5ODIa4BDEArvZv8gHrQb5s=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 h1:2HvVAIq+YqgGotK6EkMf+KIEqTISmTYh5zLpYyeTo1Y=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20/go.mod h1:V4X406Y666khGa8ghKmphma/7C0DAtEQYhkq9z4vpbk=
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.8 h1:mGgiunl7ZwOwhpJwJNF4JfsZFYJp08wjyS3NqFQe3ws=
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.8/go.mod h1:KdM2EhXeHfeBQz5keOvv/FM7kbesjCWm7HEEyJe3frs=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 h1:QKZH0S178gCmFEgst8hN0mCX1KxLgHBKKY/CLqwP8lg=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.9/go.mod h1:7yuQJoT+OoH8aqIxw9vwF+8KpvLZ8AWmvmUWHsGQZvI=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.8/go.mod h1:LXypKvk85AROkKhOG6/YEcHFPoX+prKTowKnVdcaIxE=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1 h1:ZtgZeMPJH8+/vNs9vJFFLI0QEzYbcN0p7x1/FFwyROc=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.1/go.mod h1:Bar4MrRxeqdn6XIh8JGfiXuFRmyrrsZNTJotxEJmWW0=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.14 h1:GcLE9ba5ehAQma6wlopUesYg/hbcOhFNWTjELkiWkh4=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.14/go.mod h1:WSvS1NLr7JaPunCXqpJnWk1Bjo7IxzZXrZi1QQCkuqM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18 h1:mP49nTpfKtpXLt5SLn8Uv8z6W+03jYVoOSAl/c02nog=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18/go.mod h1:YO8TrYtFdl5w/4vmjL8zaBSsiNp3w0L1FfKVKenZT7w=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 h1:p8ogvvLugcR/zLBXTXrTkj0RYBUdErbMnAFFp12Lm/U=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.10/go.mod h1:60dv0eZJfeVXfbT1tFJinbHrDfSJ2GZl4Q//OSSNAVw=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 h1:kiIDLZ005EcKomYYITtfsjn7dtOwHDOFy7IbPXKek2o=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.13/go.mod h1:2h/xGEowcW/g38g06g3KpRWDlT+OTfxxI0o1KqayAB8=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 h1:jzKAXIlhZhJbnYwHbvUQZEB8KfgAEuG0dc08Bkda7NU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17/go.mod h1:Al9fFsXjv4KfbzQHGe6V4NZSZQXecFcvaIF4e70FoRA=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 h1:Cng+OOwCHmFljXIxpEVXAGMnBia8MSU6Ch5i9PgBkcU=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.9/go.mod h1:LrlIndBDdjA/EeXeyNBle+gyCwTlizzW5ycgWnvIxkk=
github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=
github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
@@ -313,8 +313,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA=
github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -947,8 +947,8 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/gopher-lua v1.1.2 h1:yF/FjE3hD65tBbt0VXLE13HWS9h34fdzJmrWRXwobGA=
github.com/yuin/gopher-lua v1.1.2/go.mod h1:7aRmXIWl37SqRf0koeyylBEzJ+aPt8A+mmkQ4f1ntR8=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
gitlab.com/gitlab-org/api/client-go v1.46.0 h1:YxBWFZIFYKcGESCb9fpkwzouo+apyB9pr/XTWzNoL24=
@@ -1445,8 +1445,6 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -136,7 +136,6 @@ nav:
- operator-manual/server-commands/additional-configuration-method.md
- Upgrading:
- operator-manual/upgrading/overview.md
- operator-manual/upgrading/3.4-3.5.md
- operator-manual/upgrading/3.3-3.4.md
- operator-manual/upgrading/3.2-3.3.md
- operator-manual/upgrading/3.1-3.2.md

View File

@@ -1333,7 +1333,7 @@ func helmTemplate(appPath string, repoRoot string, env *v1alpha1.Env, q *apiclie
return nil, "", fmt.Errorf("error getting helm repos: %w", err)
}
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
h, err := helm.NewHelmApp(appPath, helmRepos, isLocal, version, proxy, q.Repo.NoProxy, passCredentials)
if err != nil {
return nil, "", fmt.Errorf("error initializing helm app object: %w", err)
}
@@ -2443,7 +2443,7 @@ func (s *Service) populateHelmAppDetails(res *apiclient.RepoAppDetailsResponse,
if err != nil {
return err
}
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials, q.Repo.Insecure)
h, err := helm.NewHelmApp(appPath, helmRepos, false, version, q.Repo.Proxy, q.Repo.NoProxy, passCredentials)
if err != nil {
return err
}

View File

@@ -1,60 +0,0 @@
local hs = {}
if obj.metadata.generation ~= nil and obj.status ~= nil and obj.status.observedGeneration ~= nil then
if obj.metadata.generation ~= obj.status.observedGeneration then
hs.status = "Progressing"
hs.message = "Waiting for NodeClaim spec to be reconciled"
return hs
end
end
if obj.status ~= nil and obj.status.conditions ~= nil then
-- Disrupting takes priority: node is being terminated/consolidated/expired
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Disrupting" and condition.status == "True" then
hs.status = "Suspended"
hs.message = condition.message
return hs
end
end
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" then
if condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
elseif condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
end
end
-- Ready condition is Unknown or absent: report the furthest phase reached
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Initialized" and condition.status == "True" then
hs.status = "Progressing"
hs.message = "Node initialized, waiting for Ready"
return hs
end
end
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Registered" and condition.status == "True" then
hs.status = "Progressing"
hs.message = "Node registered, waiting for initialization"
return hs
end
end
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Launched" and condition.status == "True" then
hs.status = "Progressing"
hs.message = "Node launched, waiting for registration"
return hs
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for NodeClaim to be launched"
return hs

View File

@@ -1,33 +0,0 @@
tests:
- healthStatus:
status: Progressing
message: "Waiting for NodeClaim to be launched"
inputPath: testdata/progressing_noStatus.yaml
- healthStatus:
status: Progressing
message: "Node launched, waiting for registration"
inputPath: testdata/progressing_launched.yaml
- healthStatus:
status: Progressing
message: "Node registered, waiting for initialization"
inputPath: testdata/progressing_registered.yaml
- healthStatus:
status: Healthy
message: ""
inputPath: testdata/healthy.yaml
- healthStatus:
status: Degraded
message: "Instance i-0abc123def456789 terminated unexpectedly"
inputPath: testdata/degraded.yaml
- healthStatus:
status: Suspended
message: "TTL expired"
inputPath: testdata/suspended_disrupting.yaml
- healthStatus:
status: Progressing
message: "Node initialized, waiting for Ready"
inputPath: testdata/progressing_initialized.yaml
- healthStatus:
status: Progressing
message: "Waiting for NodeClaim to be launched"
inputPath: testdata/progressing_readyUnknown.yaml

View File

@@ -1,32 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5
status:
conditions:
- message: ""
reason: Launched
status: "True"
type: Launched
- message: ""
reason: Registered
status: "True"
type: Registered
- message: ""
reason: Initialized
status: "True"
type: Initialized
- message: "Instance i-0abc123def456789 terminated unexpectedly"
reason: NotReady
status: "False"
type: Ready

View File

@@ -1,34 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5
status:
nodeName: ip-10-0-1-100.ec2.internal
providerID: aws:///us-east-1a/i-0abc123def456789
conditions:
- message: ""
reason: Launched
status: "True"
type: Launched
- message: ""
reason: Registered
status: "True"
type: Registered
- message: ""
reason: Initialized
status: "True"
type: Initialized
- message: ""
reason: Ready
status: "True"
type: Ready

View File

@@ -1,36 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
generation: 1
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5
status:
observedGeneration: 1
nodeName: ip-10-0-1-100.ec2.internal
providerID: aws:///us-east-1a/i-0abc123def456789
conditions:
- message: ""
reason: Launched
status: "True"
type: Launched
- message: ""
reason: Registered
status: "True"
type: Registered
- message: ""
reason: Initialized
status: "True"
type: Initialized
- message: ""
reason: Ready
status: "Unknown"
type: Ready

View File

@@ -1,21 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5
status:
providerID: aws:///us-east-1a/i-0abc123def456789
conditions:
- message: ""
reason: Launched
status: "True"
type: Launched

View File

@@ -1,14 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5

View File

@@ -1,22 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
generation: 1
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5
status:
observedGeneration: 1
conditions:
- message: ""
reason: Unknown
status: "Unknown"
type: Ready

View File

@@ -1,26 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5
status:
nodeName: ip-10-0-1-100.ec2.internal
providerID: aws:///us-east-1a/i-0abc123def456789
conditions:
- message: ""
reason: Launched
status: "True"
type: Launched
- message: ""
reason: Registered
status: "True"
type: Registered

View File

@@ -1,38 +0,0 @@
apiVersion: karpenter.sh/v1
kind: NodeClaim
metadata:
name: default-xxxx
spec:
nodeClassRef:
group: karpenter.k8s.aws
kind: EC2NodeClass
name: default
requirements:
- key: karpenter.k8s.aws/instance-family
operator: In
values:
- m5
status:
nodeName: ip-10-0-1-100.ec2.internal
providerID: aws:///us-east-1a/i-0abc123def456789
conditions:
- message: ""
reason: Launched
status: "True"
type: Launched
- message: ""
reason: Registered
status: "True"
type: Registered
- message: ""
reason: Initialized
status: "True"
type: Initialized
- message: ""
reason: Ready
status: "True"
type: Ready
- message: "TTL expired"
reason: Expired
status: "True"
type: Disrupting

View File

@@ -508,7 +508,7 @@ func (s *Server) GetManifests(ctx context.Context, q *application.ApplicationMan
return fmt.Errorf("error getting app instance label key from settings: %w", err)
}
config, err := s.getApplicationClusterConfig(ctx, a, proj)
config, err := s.getApplicationClusterConfig(ctx, a)
if err != nil {
return fmt.Errorf("error getting application cluster config: %w", err)
}
@@ -670,7 +670,7 @@ func (s *Server) GetManifestsWithFiles(stream application.ApplicationService_Get
return fmt.Errorf("error getting trackingMethod from settings: %w", err)
}
config, err := s.getApplicationClusterConfig(ctx, a, proj)
config, err := s.getApplicationClusterConfig(ctx, a)
if err != nil {
return fmt.Errorf("error getting application cluster config: %w", err)
}
@@ -879,7 +879,7 @@ func (s *Server) Get(ctx context.Context, q *application.ApplicationQuery) (*v1a
// ListResourceEvents returns a list of event resources
func (s *Server) ListResourceEvents(ctx context.Context, q *application.ApplicationResourceEventsQuery) (*corev1.EventList, error) {
a, p, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
if err != nil {
return nil, err
}
@@ -918,7 +918,7 @@ func (s *Server) ListResourceEvents(ctx context.Context, q *application.Applicat
namespace = q.GetResourceNamespace()
var config *rest.Config
config, err = s.getApplicationClusterConfig(ctx, a, p)
config, err = s.getApplicationClusterConfig(ctx, a)
if err != nil {
return nil, fmt.Errorf("error getting application cluster config: %w", err)
}
@@ -1377,7 +1377,7 @@ func (s *Server) validateAndNormalizeApp(ctx context.Context, app *v1alpha1.Appl
return nil
}
func (s *Server) getApplicationClusterConfig(ctx context.Context, a *v1alpha1.Application, p *v1alpha1.AppProject) (*rest.Config, error) {
func (s *Server) getApplicationClusterConfig(ctx context.Context, a *v1alpha1.Application) (*rest.Config, error) {
cluster, err := argo.GetDestinationCluster(ctx, a.Spec.Destination, s.db)
if err != nil {
return nil, fmt.Errorf("error validating destination: %w", err)
@@ -1387,24 +1387,6 @@ func (s *Server) getApplicationClusterConfig(ctx context.Context, a *v1alpha1.Ap
return nil, fmt.Errorf("error getting cluster REST config: %w", err)
}
impersonationEnabled, err := s.settingsMgr.IsImpersonationEnabled()
if err != nil {
return nil, fmt.Errorf("error getting impersonation setting: %w", err)
}
if !impersonationEnabled {
return config, nil
}
user, err := settings.DeriveServiceAccountToImpersonate(p, a, cluster)
if err != nil {
return nil, fmt.Errorf("error deriving service account to impersonate: %w", err)
}
config.Impersonate = rest.ImpersonationConfig{
UserName: user,
}
return config, err
}
@@ -1455,7 +1437,7 @@ func (s *Server) getAppLiveResource(ctx context.Context, action string, q *appli
if fineGrainedInheritanceDisabled && (action == rbac.ActionDelete || action == rbac.ActionUpdate) {
action = fmt.Sprintf("%s/%s/%s/%s/%s", action, q.GetGroup(), q.GetKind(), q.GetNamespace(), q.GetResourceName())
}
a, p, err := s.getApplicationEnforceRBACInformer(ctx, action, q.GetProject(), q.GetAppNamespace(), q.GetName())
a, _, err := s.getApplicationEnforceRBACInformer(ctx, action, q.GetProject(), q.GetAppNamespace(), q.GetName())
if !fineGrainedInheritanceDisabled && err != nil && errors.Is(err, argocommon.PermissionDeniedAPIError) && (action == rbac.ActionDelete || action == rbac.ActionUpdate) {
action = fmt.Sprintf("%s/%s/%s/%s/%s", action, q.GetGroup(), q.GetKind(), q.GetNamespace(), q.GetResourceName())
a, _, err = s.getApplicationEnforceRBACInformer(ctx, action, q.GetProject(), q.GetAppNamespace(), q.GetName())
@@ -1473,11 +1455,10 @@ func (s *Server) getAppLiveResource(ctx context.Context, action string, q *appli
if found == nil || found.UID == "" {
return nil, nil, nil, status.Errorf(codes.InvalidArgument, "%s %s %s not found as part of application %s", q.GetKind(), q.GetGroup(), q.GetResourceName(), q.GetName())
}
config, err := s.getApplicationClusterConfig(ctx, a, p)
config, err := s.getApplicationClusterConfig(ctx, a)
if err != nil {
return nil, nil, nil, fmt.Errorf("error getting application cluster config: %w", err)
}
return found, config, a, nil
}
@@ -1590,7 +1571,6 @@ func (s *Server) DeleteResource(ctx context.Context, q *application.ApplicationR
propagationPolicy := metav1.DeletePropagationForeground
deleteOption = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}
}
err = s.kubectl.DeleteResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace, deleteOption)
if err != nil {
return nil, fmt.Errorf("error deleting resource: %w", err)
@@ -1846,7 +1826,7 @@ func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application.
}
}
a, p, err := s.getApplicationEnforceRBACInformer(ws.Context(), rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
a, _, err := s.getApplicationEnforceRBACInformer(ws.Context(), rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
if err != nil {
return err
}
@@ -1860,7 +1840,7 @@ func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application.
return fmt.Errorf("error getting app resource tree: %w", err)
}
config, err := s.getApplicationClusterConfig(ws.Context(), a, p)
config, err := s.getApplicationClusterConfig(ws.Context(), a)
if err != nil {
return fmt.Errorf("error getting application cluster config: %w", err)
}
@@ -2535,8 +2515,7 @@ func (s *Server) ListResourceActions(ctx context.Context, q *application.Applica
func (s *Server) getUnstructuredLiveResourceOrApp(ctx context.Context, rbacRequest string, q *application.ApplicationResourceRequest) (obj *unstructured.Unstructured, res *v1alpha1.ResourceNode, app *v1alpha1.Application, config *rest.Config, err error) {
if q.GetKind() == applicationType.ApplicationKind && q.GetGroup() == applicationType.Group && q.GetName() == q.GetResourceName() {
var p *v1alpha1.AppProject
app, p, err = s.getApplicationEnforceRBACInformer(ctx, rbacRequest, q.GetProject(), q.GetAppNamespace(), q.GetName())
app, _, err = s.getApplicationEnforceRBACInformer(ctx, rbacRequest, q.GetProject(), q.GetAppNamespace(), q.GetName())
if err != nil {
return nil, nil, nil, nil, err
}
@@ -2544,7 +2523,7 @@ func (s *Server) getUnstructuredLiveResourceOrApp(ctx context.Context, rbacReque
if err != nil {
return nil, nil, nil, nil, err
}
config, err = s.getApplicationClusterConfig(ctx, app, p)
config, err = s.getApplicationClusterConfig(ctx, app)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("error getting application cluster config: %w", err)
}

View File

@@ -4644,129 +4644,3 @@ func TestTerminateOperationWithConflicts(t *testing.T) {
require.NoError(t, err)
assert.GreaterOrEqual(t, updateCallCount, 2, "Update should be called at least twice (once with conflict, once with success)")
}
func TestGetApplicationClusterConfig(t *testing.T) {
t.Run("ImpersonationDisabled", func(t *testing.T) {
app := newTestApp()
appServer := newTestAppServer(t, app)
project := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"},
Spec: v1alpha1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "*"}},
},
}
config, err := appServer.getApplicationClusterConfig(t.Context(), app, project)
require.NoError(t, err)
assert.Empty(t, config.Impersonate.UserName)
})
t.Run("ImpersonationEnabledWithMatch", func(t *testing.T) {
f := func(enf *rbac.Enforcer) {
_ = enf.SetBuiltinPolicy(assets.BuiltinPolicyCSV)
enf.SetDefaultRole("role:admin")
}
projWithSA := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "proj-impersonate", Namespace: "default"},
Spec: v1alpha1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "*"}},
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://cluster-api.example.com",
Namespace: test.FakeDestNamespace,
DefaultServiceAccount: "test-sa",
},
},
},
}
app := newTestApp(func(a *v1alpha1.Application) {
a.Spec.Project = "proj-impersonate"
})
appServer := newTestAppServerWithEnforcerConfigure(t, f,
map[string]string{"application.sync.impersonation.enabled": "true"},
app, projWithSA,
)
config, err := appServer.getApplicationClusterConfig(t.Context(), app, projWithSA)
require.NoError(t, err)
assert.Equal(t, "system:serviceaccount:"+test.FakeDestNamespace+":test-sa", config.Impersonate.UserName)
})
t.Run("ImpersonationEnabledWithNoMatch", func(t *testing.T) {
f := func(enf *rbac.Enforcer) {
_ = enf.SetBuiltinPolicy(assets.BuiltinPolicyCSV)
enf.SetDefaultRole("role:admin")
}
app := newTestApp()
appServer := newTestAppServerWithEnforcerConfigure(t, f,
map[string]string{"application.sync.impersonation.enabled": "true"},
app,
)
// "default" project has no DestinationServiceAccounts
project := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"},
Spec: v1alpha1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "*"}},
},
}
config, err := appServer.getApplicationClusterConfig(t.Context(), app, project)
assert.Nil(t, config)
assert.ErrorContains(t, err, "no matching service account found")
})
}
func TestGetUnstructuredLiveResourceOrAppWithImpersonation(t *testing.T) {
f := func(enf *rbac.Enforcer) {
_ = enf.SetBuiltinPolicy(assets.BuiltinPolicyCSV)
enf.SetDefaultRole("role:admin")
}
projWithSA := &v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "proj-impersonate", Namespace: "default"},
Spec: v1alpha1.AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []v1alpha1.ApplicationDestination{{Server: "*", Namespace: "*"}},
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{
Server: "https://cluster-api.example.com",
Namespace: test.FakeDestNamespace,
DefaultServiceAccount: "test-sa",
},
},
},
}
app := newTestApp(func(a *v1alpha1.Application) {
a.Spec.Project = "proj-impersonate"
})
appServer := newTestAppServerWithEnforcerConfigure(t, f,
map[string]string{"application.sync.impersonation.enabled": "true"},
app, projWithSA,
)
appName := app.Name
group := "argoproj.io"
kind := "Application"
project := "proj-impersonate"
_, _, _, config, err := appServer.getUnstructuredLiveResourceOrApp(t.Context(), rbac.ActionGet, &application.ApplicationResourceRequest{
Name: &appName,
ResourceName: &appName,
Group: &group,
Kind: &kind,
Project: &project,
})
require.NoError(t, err)
assert.Equal(t, "system:serviceaccount:"+test.FakeDestNamespace+":test-sa", config.Impersonate.UserName)
}

View File

@@ -14,7 +14,7 @@ import (
"github.com/felixge/httpsnoop"
log "github.com/sirupsen/logrus"
"go.yaml.in/yaml/v3"
"gopkg.in/yaml.v3"
"github.com/argoproj/argo-cd/v3/util/rbac"

View File

@@ -334,6 +334,8 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts, appsetOpts Applicatio
appsetLister := appFactory.Argoproj().V1alpha1().ApplicationSets().Lister()
userStateStorage := util_session.NewUserStateStorage(opts.RedisClient)
ssoClientApp, err := oidc.NewClientApp(settings, opts.DexServerAddr, opts.DexTLSConfig, opts.BaseHRef, cacheutil.NewRedisCache(opts.RedisClient, settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
errorsutil.CheckError(err)
sessionMgr := util_session.NewSessionManager(settingsMgr, projLister, opts.DexServerAddr, opts.DexTLSConfig, userStateStorage)
enf := rbac.NewEnforcer(opts.KubeClientset, opts.Namespace, common.ArgoCDRBACConfigMapName, nil)
enf.EnableEnforce(!opts.DisableAuth)
@@ -381,6 +383,7 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts, appsetOpts Applicatio
a := &ArgoCDServer{
ArgoCDServerOpts: opts,
ApplicationSetOpts: appsetOpts,
ssoClientApp: ssoClientApp,
log: logger,
settings: settings,
sessionMgr: sessionMgr,
@@ -491,11 +494,11 @@ func (server *ArgoCDServer) logInClusterWarnings() error {
}
if len(inClusterSecrets) > 0 {
// Don't make this call unless we actually have in-cluster secrets, to save time.
inClusterEnabled, err := server.settingsMgr.IsInClusterEnabled()
dbSettings, err := server.settingsMgr.GetSettings()
if err != nil {
return fmt.Errorf("could not check if in-cluster is enabled: %w", err)
return fmt.Errorf("could not get DB settings: %w", err)
}
if !inClusterEnabled {
if !dbSettings.InClusterEnabled {
for _, clusterName := range inClusterSecrets {
log.Warnf("cluster %q uses in-cluster server address but it's disabled in Argo CD settings", clusterName)
}
@@ -583,10 +586,6 @@ func (server *ArgoCDServer) Run(ctx context.Context, listeners *Listeners) {
if server.RedisClient != nil {
cacheutil.CollectMetrics(server.RedisClient, metricsServ, server.userStateStorage.GetLockObject())
}
// OIDC config needs to be refreshed at each server restart
ssoClientApp, err := oidc.NewClientApp(server.settings, server.DexServerAddr, server.DexTLSConfig, server.BaseHRef, cacheutil.NewRedisCache(server.RedisClient, server.settings.UserInfoCacheExpiration(), cacheutil.RedisCompressionNone))
errorsutil.CheckError(err)
server.ssoClientApp = ssoClientApp
// Don't init storage until after CollectMetrics. CollectMetrics adds hooks to the Redis client, and Init
// reads those hooks. If this is called first, there may be a data race.

View File

@@ -488,100 +488,6 @@ func TestGracefulShutdown(t *testing.T) {
assert.True(t, shutdown)
}
func TestOIDCRefresh(t *testing.T) {
port, err := test.GetFreePort()
require.NoError(t, err)
mockRepoClient := &mocks.Clientset{RepoServerServiceClient: &mocks.RepoServerServiceClient{}}
cm := test.NewFakeConfigMap()
cm.Data["oidc.config"] = `
name: Test OIDC
issuer: $oidc.myoidc.issuer
clientID: $oidc.myoidc.clientId
clientSecret: $oidc.myoidc.clientSecret
`
secret := test.NewFakeSecret()
issuerURL := "http://oidc.127.0.0.1.nip.io"
updatedIssuerURL := "http://newoidc.127.0.0.1.nip.io"
secret.Data["oidc.myoidc.issuer"] = []byte(issuerURL)
secret.Data["oidc.myoidc.clientId"] = []byte("myClientId")
secret.Data["oidc.myoidc.clientSecret"] = []byte("myClientSecret")
kubeclientset := fake.NewSimpleClientset(cm, secret)
redis, redisCloser := test.NewInMemoryRedis()
defer redisCloser()
s := NewServer(
t.Context(),
ArgoCDServerOpts{
ListenPort: port,
Namespace: test.FakeArgoCDNamespace,
KubeClientset: kubeclientset,
AppClientset: apps.NewSimpleClientset(),
RepoClientset: mockRepoClient,
RedisClient: redis,
},
ApplicationSetOpts{},
)
projInformerCancel := test.StartInformer(s.projInformer)
defer projInformerCancel()
appInformerCancel := test.StartInformer(s.appInformer)
defer appInformerCancel()
appsetInformerCancel := test.StartInformer(s.appsetInformer)
defer appsetInformerCancel()
clusterInformerCancel := test.StartInformer(s.clusterInformer)
defer clusterInformerCancel()
shutdown := false
lns, err := s.Listen()
require.NoError(t, err)
runCtx := t.Context()
var wg gosync.WaitGroup
wg.Add(1)
go func(shutdown *bool) {
defer wg.Done()
s.Run(runCtx, lns)
*shutdown = true
}(&shutdown)
for !s.available.Load() {
time.Sleep(10 * time.Millisecond)
}
assert.True(t, s.available.Load())
assert.Equal(t, issuerURL, s.ssoClientApp.IssuerURL())
// Update oidc config
secret.Data["oidc.myoidc.issuer"] = []byte(updatedIssuerURL)
secret.ResourceVersion = "12345"
_, err = kubeclientset.CoreV1().Secrets(test.FakeArgoCDNamespace).Update(runCtx, secret, metav1.UpdateOptions{})
require.NoError(t, err)
// Wait for graceful shutdown
wg.Wait()
for s.available.Load() {
time.Sleep(10 * time.Millisecond)
}
assert.False(t, s.available.Load())
shutdown = false
wg.Add(1)
go func(shutdown *bool) {
defer wg.Done()
s.Run(runCtx, lns)
*shutdown = true
}(&shutdown)
for !s.available.Load() {
time.Sleep(10 * time.Millisecond)
}
assert.True(t, s.available.Load())
assert.Equal(t, updatedIssuerURL, s.ssoClientApp.IssuerURL())
s.stopCh <- syscall.SIGINT
wg.Wait()
}
func TestAuthenticate(t *testing.T) {
type testData struct {
test string

View File

@@ -14,7 +14,7 @@ FROM docker.io/library/registry:3.0@sha256:6c5666b861f3505b116bb9aa9b25175e71210
FROM docker.io/bitnamilegacy/kubectl:1.32@sha256:9524faf8e3cefb47fa28244a5d15f95ec21a73d963273798e593e61f80712333 AS kubectl
FROM docker.io/library/ubuntu:26.04@sha256:730382b4a53a3c4a1498b7a36f11a62117f133fe6e73b01bb91303ed2ad87cdd
FROM docker.io/library/ubuntu:26.04@sha256:91832dcd7bc5e44c098ecefc0a251a5c5d596dae494b33fb248e01b6840f8ce0
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -133,7 +133,7 @@ func TestSimpleGitDirectoryGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestSimpleGitDirectoryGeneratorGoTemplate(t *testing.T) {
@@ -240,7 +240,7 @@ func TestSimpleGitDirectoryGeneratorGoTemplate(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestSimpleGitDirectoryGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
@@ -335,7 +335,7 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
// verify the ApplicationSet error status conditions were set correctly
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
}
func TestSimpleGitDirectoryGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
@@ -438,7 +438,7 @@ func TestSimpleGitDirectoryGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
Expect(ApplicationsDoNotExist(expectedApps)).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
}
func TestSimpleGitFilesGenerator(t *testing.T) {
@@ -544,7 +544,7 @@ func TestSimpleGitFilesGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestSimpleGitFilesGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
@@ -639,7 +639,7 @@ func TestSimpleGitFilesGeneratorGPGEnabledUnsignedCommits(t *testing.T) {
// verify the ApplicationSet error status conditions were set correctly
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
}
func TestSimpleGitFilesGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
@@ -738,7 +738,7 @@ func TestSimpleGitFilesGeneratorGPGEnabledWithoutKnownKeys(t *testing.T) {
Expect(ApplicationSetHasConditions(expectedConditionsParamsError)).
Expect(ApplicationsDoNotExist(expectedApps)).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
}
func TestSimpleGitFilesGeneratorGoTemplate(t *testing.T) {
@@ -845,7 +845,7 @@ func TestSimpleGitFilesGeneratorGoTemplate(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestSimpleGitFilesPreserveResourcesOnDeletion(t *testing.T) {
@@ -894,7 +894,7 @@ func TestSimpleGitFilesPreserveResourcesOnDeletion(t *testing.T) {
// We use an extra-long duration here, as we might need to wait for image pull.
}).Then().ExpectWithDuration(Pod(t, func(p corev1.Pod) bool { return strings.Contains(p.Name, "guestbook-ui") }), 6*time.Minute).
When().
Delete(metav1.DeletePropagationForeground).
Delete().
And(func() {
t.Log("Waiting 15 seconds to give the cluster a chance to delete the pods.")
// Wait 15 seconds to give the cluster a chance to deletes the pods, if it is going to do so.
@@ -952,7 +952,7 @@ func TestSimpleGitFilesPreserveResourcesOnDeletionGoTemplate(t *testing.T) {
// We use an extra-long duration here, as we might need to wait for image pull.
}).Then().ExpectWithDuration(Pod(t, func(p corev1.Pod) bool { return strings.Contains(p.Name, "guestbook-ui") }), 6*time.Minute).
When().
Delete(metav1.DeletePropagationForeground).
Delete().
And(func() {
t.Log("Waiting 15 seconds to give the cluster a chance to delete the pods.")
// Wait 15 seconds to give the cluster a chance to deletes the pods, if it is going to do so.
@@ -1034,7 +1034,7 @@ func TestGitGeneratorPrivateRepo(t *testing.T) {
}).Then().Expect(ApplicationsExist(expectedApps)).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestGitGeneratorPrivateRepoGoTemplate(t *testing.T) {
@@ -1108,7 +1108,7 @@ func TestGitGeneratorPrivateRepoGoTemplate(t *testing.T) {
}).Then().Expect(ApplicationsExist(expectedApps)).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestSimpleGitGeneratorPrivateRepoWithNoRepo(t *testing.T) {
@@ -1180,7 +1180,7 @@ func TestSimpleGitGeneratorPrivateRepoWithNoRepo(t *testing.T) {
}).Then().Expect(ApplicationsDoNotExist(expectedApps)).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestSimpleGitGeneratorPrivateRepoWithMatchingProject(t *testing.T) {
@@ -1251,7 +1251,7 @@ func TestSimpleGitGeneratorPrivateRepoWithMatchingProject(t *testing.T) {
}).Then().Expect(ApplicationsExist(expectedApps)).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(expectedApps))
}
func TestSimpleGitGeneratorPrivateRepoWithMismatchingProject(t *testing.T) {
@@ -1324,7 +1324,7 @@ func TestSimpleGitGeneratorPrivateRepoWithMismatchingProject(t *testing.T) {
}).Then().Expect(ApplicationsDoNotExist(expectedApps)).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestGitGeneratorPrivateRepoWithTemplatedProject(t *testing.T) {
@@ -1400,7 +1400,7 @@ func TestGitGeneratorPrivateRepoWithTemplatedProject(t *testing.T) {
}).Then().Expect(ApplicationsExist(expectedApps)).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestGitGeneratorPrivateRepoWithTemplatedProjectAndProjectScopedRepo(t *testing.T) {
@@ -1484,5 +1484,5 @@ func TestGitGeneratorPrivateRepoWithTemplatedProjectAndProjectScopedRepo(t *test
}).Then().Expect(ApplicationsDoNotExist(expectedApps)).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}

View File

@@ -175,7 +175,7 @@ func TestApplicationSetProgressiveSyncStep(t *testing.T) {
ExpectWithDuration(CheckApplicationInRightSteps("3", []string{"app3-prod"}), time.Second*5).
// cleanup
When().
Delete(metav1.DeletePropagationForeground).
Delete().
Then().
ExpectWithDuration(ApplicationsDoNotExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp}), time.Minute)
}
@@ -184,9 +184,9 @@ func TestProgressiveSyncHealthGating(t *testing.T) {
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
}
expectedDevApp := generateExpectedApp("prog-", "progressive-sync/", "dev", "dev", "")
expectedStageApp := generateExpectedApp("prog-", "progressive-sync/", "staging", "staging", "")
expectedProdApp := generateExpectedApp("prog-", "progressive-sync/", "prod", "prod", "")
expectedDevApp := generateExpectedApp("prog-", "progressive-sync/", "dev", "dev")
expectedStageApp := generateExpectedApp("prog-", "progressive-sync/", "staging", "staging")
expectedProdApp := generateExpectedApp("prog-", "progressive-sync/", "prod", "prod")
expectedStatusWave1 := map[string]v1alpha1.ApplicationSetApplicationStatus{
"prog-dev": {
@@ -343,7 +343,7 @@ func TestProgressiveSyncHealthGating(t *testing.T) {
}).
// Cleanup
When().
Delete(metav1.DeletePropagationForeground).
Delete().
Then().
ExpectWithDuration(ApplicationsDoNotExist([]v1alpha1.Application{expectedDevApp, expectedStageApp, expectedProdApp}), TransitionTimeout)
}
@@ -381,9 +381,9 @@ func TestNoApplicationStatusWhenNoSteps(t *testing.T) {
}
expectedApps := []v1alpha1.Application{
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev", ""),
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging", ""),
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod", ""),
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev"),
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging"),
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod"),
}
Given(t).
When().
@@ -393,7 +393,7 @@ func TestNoApplicationStatusWhenNoSteps(t *testing.T) {
Expect(ApplicationSetDoesNotHaveApplicationStatus()).
// Cleanup
When().
Delete(metav1.DeletePropagationForeground).
Delete().
Then().
ExpectWithDuration(ApplicationsDoNotExist(expectedApps), TransitionTimeout)
}
@@ -403,9 +403,9 @@ func TestNoApplicationStatusWhenNoApplications(t *testing.T) {
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
}
expectedApps := []v1alpha1.Application{
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev", ""),
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging", ""),
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod", ""),
generateExpectedApp("prog-", "progressive-sync/", "dev", "dev"),
generateExpectedApp("prog-", "progressive-sync/", "staging", "staging"),
generateExpectedApp("prog-", "progressive-sync/", "prod", "prod"),
}
Given(t).
When().
@@ -415,107 +415,37 @@ func TestNoApplicationStatusWhenNoApplications(t *testing.T) {
Expect(ApplicationSetDoesNotHaveApplicationStatus()).
// Cleanup
When().
Delete(metav1.DeletePropagationForeground).
Delete().
Then().
Expect(ApplicationsDoNotExist(expectedApps))
}
func TestProgressiveSyncMultipleAppsPerStepWithReverseDeletionOrder(t *testing.T) {
func TestProgressiveSyncMultipleAppsPerStep(t *testing.T) {
if os.Getenv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS") != "true" {
t.Skip("Skipping progressive sync tests - ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS not enabled")
}
// Define app groups by step (for reverse deletion: prod -> staging -> dev)
prodApps := []string{"prog-ship", "prog-run"}
stagingApps := []string{"prog-verify", "prog-validate"}
devApps := []string{"prog-sketch", "prog-build"}
testFinalizer := "test.e2e.argoproj.io/wait-for-verification"
// Create expected app definitions for existence checks
expectedProdApps := []v1alpha1.Application{
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "ship", "prod", testFinalizer),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "run", "prod", testFinalizer),
expectedApps := []v1alpha1.Application{
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "sketch", "dev"),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "build", "dev"),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "verify", "staging"),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "validate", "staging"),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "ship", "prod"),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/prod/", "run", "prod"),
}
expectedStagingApps := []v1alpha1.Application{
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "verify", "staging", testFinalizer),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/staging/", "validate", "staging", testFinalizer),
}
expectedDevApps := []v1alpha1.Application{
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "sketch", "dev", testFinalizer),
generateExpectedApp("prog-", "progressive-sync/multiple-apps-in-step/dev/", "build", "dev", testFinalizer),
}
var allExpectedApps []v1alpha1.Application
allExpectedApps = append(allExpectedApps, expectedProdApps...)
allExpectedApps = append(allExpectedApps, expectedStagingApps...)
allExpectedApps = append(allExpectedApps, expectedDevApps...)
Given(t).
When().
Create(appSetWithReverseDeletionOrder).
Create(appSetWithMultipleAppsInEachStep).
Then().
And(func() {
t.Log("ApplicationSet with reverse deletion order created")
}).
Expect(ApplicationsExist(allExpectedApps)).
Expect(ApplicationsExist(expectedApps)).
Expect(CheckApplicationInRightSteps("1", []string{"prog-sketch", "prog-build"})).
Expect(CheckApplicationInRightSteps("2", []string{"prog-verify", "prog-validate"})).
Expect(CheckApplicationInRightSteps("3", []string{"prog-ship", "prog-run"})).
ExpectWithDuration(ApplicationSetHasApplicationStatus(6), TransitionTimeout).
And(func() {
t.Log("All 6 applications exist and are tracked in ApplicationSet status")
}).
// Delete the ApplicationSet
// Cleanup
When().
Delete(metav1.DeletePropagationBackground).
Delete().
Then().
And(func() {
t.Log("Starting deletion - should happen in reverse order: prod -> staging -> dev")
t.Log("Wave 1: Verifying prod apps (prog-ship, prog-run) are deleted first")
}).
// Wave 1: Prod apps should be deleted first, others untouched
Expect(ApplicationDeletionStarted(prodApps)).
Expect(ApplicationsExistAndNotBeingDeleted(append(stagingApps, devApps...))).
And(func() {
t.Log("Wave 1 confirmed: prod apps deleting/gone, staging and dev apps still exist and not being deleted")
}).
When().
RemoveFinalizerFromApps(prodApps, testFinalizer).
Then().
And(func() {
t.Log("removed finalizer from prod apps, confirm prod apps deleted")
t.Log("Wave 2: Verifying staging apps (prog-verify, prog-validate) are deleted second")
}).
// Wave 2: Staging apps being deleted, dev untouched
ExpectWithDuration(ApplicationsDoNotExist(expectedProdApps), TransitionTimeout).
Expect(ApplicationDeletionStarted(stagingApps)).
Expect(ApplicationsExistAndNotBeingDeleted(devApps)).
And(func() {
t.Log("Wave 2 confirmed: prod apps gone, staging apps deleting/gone, dev apps still exist and not being deleted")
}).
When().
RemoveFinalizerFromApps(stagingApps, testFinalizer).
Then().
And(func() {
t.Log("removed finalizer from staging apps, confirm staging apps deleted")
t.Log("Wave 3: Verifying dev apps (prog-sketch, prog-build) are deleted last")
}).
// Wave 3: Dev apps deleted last
ExpectWithDuration(ApplicationsDoNotExist(expectedStagingApps), TransitionTimeout).
Expect(ApplicationDeletionStarted(devApps)).
And(func() {
t.Log("Wave 3 confirmed: all prod and staging apps gone, dev apps deleting/gone")
}).
When().
RemoveFinalizerFromApps(devApps, testFinalizer).
Then().
And(func() {
t.Log("removed finalizer from dev apps, confirm dev apps deleted")
t.Log("Waiting for final cleanup - all applications should be deleted")
}).
// Final: All applications should be gone
ExpectWithDuration(ApplicationsDoNotExist(allExpectedApps), time.Minute).
And(func() {
t.Log("Reverse deletion order verified successfully!")
t.Log("Deletion sequence was: prod -> staging -> dev")
})
Expect(ApplicationsDoNotExist(expectedApps))
}
var appSetInvalidStepConfiguration = v1alpha1.ApplicationSet{
@@ -626,13 +556,9 @@ var appSetWithEmptyGenerator = v1alpha1.ApplicationSet{
},
}
var appSetWithReverseDeletionOrder = v1alpha1.ApplicationSet{
var appSetWithMultipleAppsInEachStep = v1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
Name: "appset-reverse-deletion-order",
},
TypeMeta: metav1.TypeMeta{
Kind: "ApplicationSet",
APIVersion: "argoproj.io/v1alpha1",
Name: "progressive-sync-multi-apps",
},
Spec: v1alpha1.ApplicationSetSpec{
GoTemplate: true,
@@ -643,10 +569,6 @@ var appSetWithReverseDeletionOrder = v1alpha1.ApplicationSet{
Labels: map[string]string{
"environment": "{{.environment}}",
},
Finalizers: []string{
"resources-finalizer.argocd.argoproj.io",
"test.e2e.argoproj.io/wait-for-verification",
},
},
Spec: v1alpha1.ApplicationSpec{
Project: "default",
@@ -683,18 +605,11 @@ var appSetWithReverseDeletionOrder = v1alpha1.ApplicationSet{
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
Steps: generateStandardRolloutSyncSteps(),
},
DeletionOrder: "Reverse",
},
},
}
func generateExpectedApp(prefix string, path string, name string, envVar string, testFinalizer string) v1alpha1.Application {
finalizers := []string{
"resources-finalizer.argocd.argoproj.io",
}
if testFinalizer != "" {
finalizers = append(finalizers, testFinalizer)
}
func generateExpectedApp(prefix string, path string, name string, envVar string) v1alpha1.Application {
return v1alpha1.Application{
TypeMeta: metav1.TypeMeta{
Kind: "Application",
@@ -706,7 +621,9 @@ func generateExpectedApp(prefix string, path string, name string, envVar string,
Labels: map[string]string{
"environment": envVar,
},
Finalizers: finalizers,
Finalizers: []string{
"resources-finalizer.argocd.argoproj.io",
},
},
Spec: v1alpha1.ApplicationSpec{
Project: "default",

View File

@@ -147,7 +147,7 @@ func TestSimpleListGeneratorExternalNamespace(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
}
func TestSimpleListGeneratorExternalNamespaceNoConflict(t *testing.T) {
@@ -325,13 +325,13 @@ func TestSimpleListGeneratorExternalNamespaceNoConflict(t *testing.T) {
Then().
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata})).
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata})).
When().
SwitchToExternalNamespace(utils.ArgoCDExternalNamespace2).
Then().
Expect(ApplicationsExist([]v1alpha1.Application{expectedAppExternalNamespace2})).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppExternalNamespace2}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppExternalNamespace2}))
}
func TestSimpleListGenerator(t *testing.T) {
@@ -420,7 +420,7 @@ func TestSimpleListGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
}
func TestSimpleListGeneratorGoTemplate(t *testing.T) {
@@ -509,7 +509,7 @@ func TestSimpleListGeneratorGoTemplate(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
}
func TestRenderHelmValuesObject(t *testing.T) {
@@ -581,7 +581,7 @@ func TestRenderHelmValuesObject(t *testing.T) {
}).Then().Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
}
func TestTemplatePatch(t *testing.T) {
@@ -705,7 +705,7 @@ func TestTemplatePatch(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewMetadata}))
}
func TestUpdateHelmValuesObject(t *testing.T) {
@@ -787,7 +787,7 @@ func TestUpdateHelmValuesObject(t *testing.T) {
Expect(ApplicationsExist([]v1alpha1.Application{expectedApp})).
When().
// Delete the ApplicationSet, and verify it deletes the Applications
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
}
func TestSyncPolicyCreateUpdate(t *testing.T) {
@@ -898,7 +898,7 @@ func TestSyncPolicyCreateUpdate(t *testing.T) {
// As policy is create-update, AppSet controller will remove all generated applications's ownerReferences on delete AppSet
// So AppSet deletion will be reflected, but all the applications it generates will still exist
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata}))
Delete().Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewMetadata}))
}
func TestSyncPolicyCreateDelete(t *testing.T) {
@@ -994,7 +994,7 @@ func TestSyncPolicyCreateDelete(t *testing.T) {
// Delete the ApplicationSet
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
}
func TestSyncPolicyCreateOnly(t *testing.T) {
@@ -1095,7 +1095,7 @@ func TestSyncPolicyCreateOnly(t *testing.T) {
// As policy is create-update, AppSet controller will remove all generated applications's ownerReferences on delete AppSet
// So AppSet deletion will be reflected, but all the applications it generates will still exist
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewNamespace}))
Delete().Then().Expect(ApplicationsExist([]v1alpha1.Application{*expectedAppNewNamespace}))
}
func githubSCMMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
@@ -1582,7 +1582,7 @@ func TestCustomApplicationFinalizers(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
}
func TestCustomApplicationFinalizersGoTemplate(t *testing.T) {
@@ -1647,7 +1647,7 @@ func TestCustomApplicationFinalizersGoTemplate(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedApp}))
}
func githubPullMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
@@ -2174,7 +2174,7 @@ func TestApplicationSetAPIListResourceEvents(t *testing.T) {
// Events list should be returned (may be empty if no events have been generated yet)
assert.NotNil(t, events)
}).
When().Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{}))
When().Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{}))
}
// TestApplicationSetHealthStatusCLI tests that the CLI commands display the health status field for an ApplicationSet.

View File

@@ -108,7 +108,7 @@ func TestSimpleClusterGeneratorExternalNamespace(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
}
func TestSimpleClusterGenerator(t *testing.T) {
@@ -199,7 +199,7 @@ func TestSimpleClusterGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
}
func TestClusterGeneratorWithLocalCluster(t *testing.T) {
@@ -311,7 +311,7 @@ func TestClusterGeneratorWithLocalCluster(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
})
}
}
@@ -392,7 +392,7 @@ func TestSimpleClusterGeneratorAddingCluster(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1, expectedAppCluster2}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1, expectedAppCluster2}))
}
func TestSimpleClusterGeneratorDeletingCluster(t *testing.T) {
@@ -473,7 +473,7 @@ func TestSimpleClusterGeneratorDeletingCluster(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1}))
}
func TestClusterGeneratorWithFlatListMode(t *testing.T) {
@@ -574,5 +574,5 @@ func TestClusterGeneratorWithFlatListMode(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster2}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster2}))
}

View File

@@ -119,7 +119,7 @@ func TestSimpleClusterDecisionResourceGeneratorExternalNamespace(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
}
func TestSimpleClusterDecisionResourceGenerator(t *testing.T) {
@@ -218,7 +218,7 @@ func TestSimpleClusterDecisionResourceGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{*expectedAppNewNamespace}))
}
func TestSimpleClusterDecisionResourceGeneratorAddingCluster(t *testing.T) {
@@ -310,7 +310,7 @@ func TestSimpleClusterDecisionResourceGeneratorAddingCluster(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1, expectedAppCluster2}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1, expectedAppCluster2}))
}
func TestSimpleClusterDecisionResourceGeneratorDeletingClusterSecret(t *testing.T) {
@@ -404,7 +404,7 @@ func TestSimpleClusterDecisionResourceGeneratorDeletingClusterSecret(t *testing.
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1}))
}
func TestSimpleClusterDecisionResourceGeneratorDeletingClusterFromResource(t *testing.T) {
@@ -505,5 +505,5 @@ func TestSimpleClusterDecisionResourceGeneratorDeletingClusterFromResource(t *te
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1}))
Delete().Then().Expect(ApplicationsDoNotExist([]v1alpha1.Application{expectedAppCluster1}))
}

View File

@@ -5,7 +5,7 @@ import (
"strings"
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
yaml "go.yaml.in/yaml/v3"
yaml "gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)

View File

@@ -7,9 +7,6 @@ import (
"strings"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"github.com/argoproj/argo-cd/v3/test/e2e/fixture"
log "github.com/sirupsen/logrus"
@@ -394,7 +391,7 @@ func (a *Actions) StatusUpdatePlacementDecision(placementDecisionName string, cl
}
// Delete deletes the ApplicationSet within the context
func (a *Actions) Delete(propagationPolicy metav1.DeletionPropagation) *Actions {
func (a *Actions) Delete() *Actions {
a.context.T().Helper()
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
@@ -411,7 +408,9 @@ func (a *Actions) Delete(propagationPolicy metav1.DeletionPropagation) *Actions
} else {
appSetClientSet = fixtureClient.AppSetClientset
}
err := appSetClientSet.Delete(context.Background(), a.context.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
deleteProp := metav1.DeletePropagationForeground
err := appSetClientSet.Delete(context.Background(), a.context.GetName(), metav1.DeleteOptions{PropagationPolicy: &deleteProp})
a.describeAction = fmt.Sprintf("Deleting ApplicationSet '%s/%s' %v", a.context.namespace, a.context.GetName(), err)
a.lastOutput, a.lastError = "", err
a.verifyAction()
@@ -567,48 +566,3 @@ func (a *Actions) AddSignedFile(fileName, fileContents string) *Actions {
fixture.AddSignedFile(a.context.T(), a.context.path+"/"+fileName, fileContents)
return a
}
func (a *Actions) RemoveFinalizerFromApps(appNames []string, finalizer string) *Actions {
a.context.T().Helper()
fixtureClient := utils.GetE2EFixtureK8sClient(a.context.T())
var namespace string
if a.context.switchToNamespace != "" {
namespace = string(a.context.switchToNamespace)
} else {
namespace = fixture.TestNamespace()
}
for _, appName := range appNames {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
app, err := fixtureClient.AppClientset.ArgoprojV1alpha1().Applications(namespace).Get(
a.context.T().Context(), appName, metav1.GetOptions{})
if err != nil {
return err
}
// Remove provided finalizer
finalizers := []string{}
for _, f := range app.Finalizers {
if f != finalizer {
finalizers = append(finalizers, f)
}
}
patch, _ := json.Marshal(map[string]any{
"metadata": map[string]any{
"finalizers": finalizers,
},
})
_, err = fixtureClient.AppClientset.ArgoprojV1alpha1().Applications(namespace).Patch(
a.context.T().Context(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
return err
}
return nil
})
if err != nil {
a.lastError = err
}
}
a.describeAction = fmt.Sprintf("removing finalizer '%s' from apps %v", finalizer, appNames)
a.verifyAction()
return a
}

View File

@@ -336,41 +336,3 @@ func ApplicationSetHasApplicationStatus(expectedApplicationStatusLength int) Exp
return succeeded, fmt.Sprintf("All Applications in ApplicationSet: '%s' are Healthy ", c.context.GetName())
}
}
// ApplicationDeletionStarted verifies at least one application from provided list of appNames has DeletionTimestamp set,
// indicating deletion has begun for this step. Returns failed if any application doesn't exist, does not expect completion of deletion.
func ApplicationDeletionStarted(appNames []string) Expectation {
return func(c *Consequences) (state, string) {
anyapp := false
for _, appName := range appNames {
app := c.app(appName)
if app == nil {
// with test finalizer explicitly added, application should not be deleted
return failed, fmt.Sprintf("no application found with name '%s'", c.context.GetName())
}
if app.DeletionTimestamp != nil {
anyapp = true
}
}
if !anyapp {
return pending, "no app in this step is being deleted yet"
}
return succeeded, fmt.Sprintf("at least one app in %v is being deleted or gone", appNames)
}
}
// ApplicationsExistAndNotBeingDeleted checks that specified apps exist and do NOT have DeletionTimestamp set
func ApplicationsExistAndNotBeingDeleted(appNames []string) Expectation {
return func(c *Consequences) (state, string) {
for _, appName := range appNames {
app := c.app(appName)
if app == nil {
return failed, fmt.Sprintf("app '%s' does not exist but should", appName)
}
if app.DeletionTimestamp != nil {
return failed, fmt.Sprintf("app '%s' is being deleted but should not be yet", appName)
}
}
return succeeded, fmt.Sprintf("all apps %v exist and are not being deleted", appNames)
}
}

View File

@@ -131,7 +131,7 @@ func TestListMatrixGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestClusterMatrixGenerator(t *testing.T) {
@@ -254,7 +254,7 @@ func TestClusterMatrixGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestMatrixTerminalMatrixGeneratorSelector(t *testing.T) {
@@ -392,7 +392,7 @@ func TestMatrixTerminalMatrixGeneratorSelector(t *testing.T) {
})
}).Then().Expect(ApplicationsExist(expectedApps)).Expect(ApplicationsDoNotExist(excludedApps)).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(excludedApps)).Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(excludedApps)).Expect(ApplicationsDoNotExist(expectedApps))
}
func TestMatrixTerminalMergeGeneratorSelector(t *testing.T) {
@@ -530,5 +530,5 @@ func TestMatrixTerminalMergeGeneratorSelector(t *testing.T) {
})
}).Then().Expect(ApplicationsExist(expectedApps)).Expect(ApplicationsDoNotExist(excludedApps)).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(excludedApps)).Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(excludedApps)).Expect(ApplicationsDoNotExist(expectedApps))
}

View File

@@ -130,7 +130,7 @@ func TestListMergeGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestClusterMergeGenerator(t *testing.T) {
@@ -271,7 +271,7 @@ func TestClusterMergeGenerator(t *testing.T) {
// Delete the ApplicationSet, and verify it deletes the Applications
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
Delete().Then().Expect(ApplicationsDoNotExist(expectedAppsNewNamespace))
}
func TestMergeTerminalMergeGeneratorSelector(t *testing.T) {
@@ -410,7 +410,7 @@ func TestMergeTerminalMergeGeneratorSelector(t *testing.T) {
})
}).Then().Expect(ApplicationsExist(expectedApps)).Expect(ApplicationsDoNotExist(excludedApps)).
When().
Delete(metav1.DeletePropagationForeground).Then().Expect(ApplicationsDoNotExist(excludedApps)).Expect(ApplicationsDoNotExist(expectedApps))
Delete().Then().Expect(ApplicationsDoNotExist(excludedApps)).Expect(ApplicationsDoNotExist(expectedApps))
}
func toAPIExtensionsJSON(t *testing.T, g any) *apiextensionsv1.JSON {

View File

@@ -1074,51 +1074,39 @@ type ClusterGetter interface {
GetClusterServersByName(ctx context.Context, server string) ([]string, error)
}
// GetDestinationServer resolves the cluster server URL for the given destination without
// fetching the full Cluster object. For server based destinations the URL is returned
// directly (normalized). For name based destinations GetClusterServersByName is called.
// An error is returned if the name is ambiguous or missing.
func GetDestinationServer(ctx context.Context, destination argoappv1.ApplicationDestination, db ClusterGetter) (string, error) {
if destination.Name != "" && destination.Server != "" {
return "", fmt.Errorf("application destination can't have both name and server defined: %s %s", destination.Name, destination.Server)
}
if destination.Server != "" {
return strings.TrimRight(destination.Server, "/"), nil
}
if destination.Name != "" {
clusterURLs, err := db.GetClusterServersByName(ctx, destination.Name)
if err != nil {
return "", fmt.Errorf("error getting cluster by name %q: %w", destination.Name, err)
}
if len(clusterURLs) == 0 {
return "", fmt.Errorf("there are no clusters with this name: %s", destination.Name)
}
if len(clusterURLs) > 1 {
return "", fmt.Errorf("there are %d clusters with the same name: [%s]", len(clusterURLs), strings.Join(clusterURLs, " "))
}
return clusterURLs[0], nil
}
// nolint:staticcheck // Error constant is very old, shouldn't lowercase the first letter.
return "", errors.New(ErrDestinationMissing)
}
// GetDestinationCluster returns the cluster object based on the destination server or name. If both are provided or
// both are empty, an error is returned. If the destination server is provided, the cluster is fetched by the server
// URL. If the destination name is provided, the cluster is fetched by the name. If multiple clusters have the specified
// name, an error is returned.
func GetDestinationCluster(ctx context.Context, destination argoappv1.ApplicationDestination, db ClusterGetter) (*argoappv1.Cluster, error) {
server, err := GetDestinationServer(ctx, destination, db)
if err != nil {
return nil, err
if destination.Name != "" && destination.Server != "" {
return nil, fmt.Errorf("application destination can't have both name and server defined: %s %s", destination.Name, destination.Server)
}
cluster, err := db.GetCluster(ctx, server)
if err != nil {
if destination.Server != "" {
if destination.Server != "" {
cluster, err := db.GetCluster(ctx, destination.Server)
if err != nil {
return nil, fmt.Errorf("error getting cluster by server %q: %w", destination.Server, err)
}
return nil, fmt.Errorf("error getting cluster by URL: %w", err)
return cluster, nil
} else if destination.Name != "" {
clusterURLs, err := db.GetClusterServersByName(ctx, destination.Name)
if err != nil {
return nil, fmt.Errorf("error getting cluster by name %q: %w", destination.Name, err)
}
if len(clusterURLs) == 0 {
return nil, fmt.Errorf("there are no clusters with this name: %s", destination.Name)
}
if len(clusterURLs) > 1 {
return nil, fmt.Errorf("there are %d clusters with the same name: [%s]", len(clusterURLs), strings.Join(clusterURLs, " "))
}
cluster, err := db.GetCluster(ctx, clusterURLs[0])
if err != nil {
return nil, fmt.Errorf("error getting cluster by URL: %w", err)
}
return cluster, nil
}
return cluster, nil
// nolint:staticcheck // Error constant is very old, shouldn't lowercase the first letter.
return nil, errors.New(ErrDestinationMissing)
}
func GetGlobalProjects(proj *argoappv1.AppProject, projLister applicationsv1.AppProjectLister, settingsManager *settings.SettingsManager) []*argoappv1.AppProject {

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"maps"
"slices"
"strconv"
"strings"
"sync"
@@ -25,13 +24,9 @@ import (
"github.com/argoproj/argo-cd/v3/util/settings"
)
const (
errCheckingInClusterEnabled = "%s: error checking if in-cluster is enabled: %v"
)
var (
localCluster = appv1.Cluster{
Name: appv1.KubernetesInClusterName,
Name: "in-cluster",
Server: appv1.KubernetesInternalAPIServerAddr,
Info: appv1.ClusterInfo{
ConnectionState: appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful},
@@ -73,10 +68,11 @@ func (db *db) ListClusters(_ context.Context) (*appv1.ClusterList, error) {
clusterList := appv1.ClusterList{
Items: make([]appv1.Cluster, 0),
}
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
settings, err := db.settingsMgr.GetSettings()
if err != nil {
log.Warnf(errCheckingInClusterEnabled, "ListClusters", err)
return nil, err
}
inClusterEnabled := settings.InClusterEnabled
hasInClusterCredentials := false
for _, clusterSecret := range clusterSecrets {
cluster, err := SecretToCluster(clusterSecret)
@@ -102,11 +98,11 @@ func (db *db) ListClusters(_ context.Context) (*appv1.ClusterList, error) {
// CreateCluster creates a cluster
func (db *db) CreateCluster(ctx context.Context, c *appv1.Cluster) (*appv1.Cluster, error) {
if c.Server == appv1.KubernetesInternalAPIServerAddr {
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
settings, err := db.settingsMgr.GetSettings()
if err != nil {
log.Warnf(errCheckingInClusterEnabled, "CreateCluster", err)
return nil, err
}
if !inClusterEnabled {
if !settings.InClusterEnabled {
return nil, status.Errorf(codes.InvalidArgument, "cannot register cluster: in-cluster has been disabled")
}
}
@@ -152,12 +148,13 @@ func (db *db) WatchClusters(ctx context.Context,
handleModEvent func(oldCluster *appv1.Cluster, newCluster *appv1.Cluster),
handleDeleteEvent func(clusterServer string),
) error {
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
log.Warnf(errCheckingInClusterEnabled, "WatchClusters", err)
return err
}
localCls := db.getLocalCluster()
if inClusterEnabled {
if argoSettings.InClusterEnabled {
localCls, err = db.GetCluster(ctx, appv1.KubernetesInternalAPIServerAddr)
if err != nil {
return fmt.Errorf("could not get local cluster: %w", err)
@@ -176,7 +173,7 @@ func (db *db) WatchClusters(ctx context.Context,
return
}
if cluster.Server == appv1.KubernetesInternalAPIServerAddr {
if inClusterEnabled {
if argoSettings.InClusterEnabled {
// change local cluster event to modified, since it cannot be added at runtime
handleModEvent(localCls, cluster)
localCls = cluster
@@ -204,7 +201,7 @@ func (db *db) WatchClusters(ctx context.Context,
},
func(secret *corev1.Secret) {
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr && inClusterEnabled {
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr && argoSettings.InClusterEnabled {
// change local cluster event to modified, since it cannot be deleted at runtime, unless disabled.
newLocalCls := db.getLocalCluster()
handleModEvent(localCls, newLocalCls)
@@ -234,16 +231,13 @@ func (db *db) getClusterSecret(server string) (*corev1.Secret, error) {
// GetCluster returns a cluster from a query
func (db *db) GetCluster(_ context.Context, server string) (*appv1.Cluster, error) {
informer, err := db.settingsMgr.GetClusterInformer()
if err != nil {
return nil, fmt.Errorf("failed to get cluster informer: %w", err)
}
informer := db.settingsMgr.GetClusterInformer()
if server == appv1.KubernetesInternalAPIServerAddr {
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
log.Warnf(errCheckingInClusterEnabled, "GetCluster", err)
return nil, err
}
if !inClusterEnabled {
if !argoSettings.InClusterEnabled {
return nil, status.Errorf(codes.NotFound, "cluster %q is disabled", server)
}
@@ -288,32 +282,24 @@ func (db *db) GetProjectClusters(_ context.Context, project string) ([]*appv1.Cl
}
func (db *db) GetClusterServersByName(_ context.Context, name string) ([]string, error) {
informer, err := db.settingsMgr.GetClusterInformer()
argoSettings, err := db.settingsMgr.GetSettings()
if err != nil {
return nil, fmt.Errorf("failed to get cluster informer: %w", err)
return nil, err
}
informer := db.settingsMgr.GetClusterInformer()
servers, err := informer.GetClusterServersByName(name)
if err != nil {
return nil, err
}
// attempt to short circuit if the in-cluster name is not involved
if name != appv1.KubernetesInClusterName && !slices.Contains(servers, appv1.KubernetesInternalAPIServerAddr) {
return servers, nil
}
inClusterEnabled, err := db.settingsMgr.IsInClusterEnabled()
if err != nil {
return nil, fmt.Errorf(errCheckingInClusterEnabled, "GetClusterServersByName", err)
}
// Handle local cluster special case
if len(servers) == 0 && name == appv1.KubernetesInClusterName && inClusterEnabled {
if len(servers) == 0 && name == "in-cluster" && argoSettings.InClusterEnabled {
return []string{appv1.KubernetesInternalAPIServerAddr}, nil
}
// Filter out disabled in-cluster
if !inClusterEnabled {
if !argoSettings.InClusterEnabled {
filtered := make([]string, 0, len(servers))
for _, s := range servers {
if s != appv1.KubernetesInternalAPIServerAddr {

View File

@@ -129,44 +129,6 @@ func TestWatchClusters_LocalClusterModifications(t *testing.T) {
assert.True(t, completed, "Failed due to timeout")
}
func TestWatchClusters_MissingServerSecretKey(t *testing.T) {
// !race:
// Intermittent failure when running with -race, likely due to race condition
// https://github.com/argoproj/argo-cd/issues/4755
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
completed := runWatchTest(t, db, []func(old *v1alpha1.Cluster, new *v1alpha1.Cluster){
func(old *v1alpha1.Cluster, new *v1alpha1.Cluster) {
assert.Nil(t, old)
assert.Equal(t, v1alpha1.KubernetesInternalAPIServerAddr, new.Server)
},
})
assert.True(t, completed, "WatchClusters should work even when server.secretkey is missing")
}
func TestWatchClusters_LocalClusterModificationsWhenDisabled(t *testing.T) {
// !race:
// Intermittent failure when running TestWatchClusters_LocalClusterModifications with -race, likely due to race condition

View File

@@ -661,225 +661,6 @@ func TestGetClusterServersByName(t *testing.T) {
})
}
func TestGetClusterServersByName_IsInClusterEnabledLazyLoad(t *testing.T) {
argoCDSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{"app.kubernetes.io/part-of": "argocd"},
},
Data: map[string][]byte{
"admin.password": nil,
"server.secretkey": nil,
},
}
prodSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster-secret",
Namespace: fakeNamespace,
Labels: map[string]string{common.LabelKeySecretType: common.LabelValueSecretTypeCluster},
Annotations: map[string]string{
common.AnnotationKeyManagedBy: common.AnnotationValueManagedByArgoCD,
},
},
Data: map[string][]byte{
"name": []byte("prod"),
"server": []byte("https://prod.example.com"),
"config": []byte("{}"),
},
}
tests := []struct {
name string
clusterName string
wantErr bool
wantServers []string
}{
{
name: "non in-cluster name does not call IsInClusterEnabled()",
clusterName: "prod",
wantErr: false,
wantServers: []string{"https://prod.example.com"},
},
{
name: "in-cluster name calls IsInClusterEnabled()",
clusterName: "in-cluster",
wantErr: true,
},
}
// argocd-cm is intentionally absent: IsInClusterEnabled() fails if called.
kubeclientset := fake.NewClientset(argoCDSecret, prodSecret)
db := NewDB(fakeNamespace, settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace), kubeclientset)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
servers, err := db.GetClusterServersByName(t.Context(), tt.clusterName)
if tt.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
assert.ElementsMatch(t, tt.wantServers, servers)
}
})
}
}
func TestCreateCluster_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
t.Run("in-cluster creation succeeds when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: v1alpha1.KubernetesInternalAPIServerAddr,
Name: "in-cluster",
})
require.NoError(t, err)
})
t.Run("external cluster creation succeeds when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: "https://my-external-cluster",
Name: "external",
})
require.NoError(t, err)
})
t.Run("in-cluster creation rejected when explicitly disabled even with missing server.secretkey", func(t *testing.T) {
argoCDConfigMapWithInClusterDisabled := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{"cluster.inClusterEnabled": "false"},
}
kubeclientset := fake.NewClientset(argoCDConfigMapWithInClusterDisabled, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
_, err := db.CreateCluster(t.Context(), &v1alpha1.Cluster{
Server: v1alpha1.KubernetesInternalAPIServerAddr,
Name: "in-cluster",
})
require.Error(t, err)
require.Contains(t, err.Error(), "in-cluster has been disabled")
})
}
func TestListClusters_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
t.Run("lists clusters including implicit in-cluster when server.secretkey is missing", func(t *testing.T) {
externalClusterSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "mycluster",
Namespace: fakeNamespace,
Labels: map[string]string{
common.LabelKeySecretType: common.LabelValueSecretTypeCluster,
},
},
Data: map[string][]byte{
"server": []byte("https://my-external-cluster"),
"name": []byte("external"),
},
}
kubeclientset := fake.NewClientset(externalClusterSecret, emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
clusters, err := db.ListClusters(t.Context())
require.NoError(t, err)
require.Len(t, clusters.Items, 2)
})
}
func TestGetClusterServersByName_MissingServerSecretKey(t *testing.T) {
emptyArgoCDConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDConfigMapName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecretWithoutSecretKey := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: common.ArgoCDSecretName,
Namespace: fakeNamespace,
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string][]byte{
"admin.password": nil,
},
}
t.Run("returns in-cluster when server.secretkey is missing", func(t *testing.T) {
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecretWithoutSecretKey)
settingsManager := settings.NewSettingsManager(t.Context(), kubeclientset, fakeNamespace)
db := NewDB(fakeNamespace, settingsManager, kubeclientset)
servers, err := db.GetClusterServersByName(t.Context(), "in-cluster")
require.NoError(t, err)
require.ElementsMatch(t, []string{v1alpha1.KubernetesInternalAPIServerAddr}, servers)
})
}
// TestClusterRaceConditionClusterSecrets reproduces a race condition
// on the cluster secrets. The test isn't asserting anything because
// before the fix it would cause a panic from concurrent map iteration and map write

View File

@@ -373,7 +373,6 @@ func (c *nativeHelmChart) loadRepoIndex(ctx context.Context, maxIndexSize int64)
Proxy: proxy.GetCallback(c.proxy, c.noProxy),
TLSClientConfig: tlsConf,
DisableKeepAlives: true,
ForceAttemptHTTP2: true,
}
client := http.Client{Transport: tr}
resp, err := client.Do(req)
@@ -493,7 +492,6 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) ([]string, error)
Proxy: proxy.GetCallback(c.proxy, c.noProxy),
TLSClientConfig: tlsConf,
DisableKeepAlives: true,
ForceAttemptHTTP2: true,
}
// Wrap transport to add User-Agent header to all requests

View File

@@ -2,7 +2,6 @@ package helm
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"math"
@@ -11,7 +10,6 @@ import (
"net/url"
"os"
"path/filepath"
"slices"
"strings"
"testing"
@@ -576,68 +574,6 @@ func TestGetTagsCaching(t *testing.T) {
})
}
func TestGetTagsUsesHTTP2(t *testing.T) {
t.Run("should negotiate HTTP/2 when TLS is configured", func(t *testing.T) {
var requestProtos []string
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestProtos = append(requestProtos, r.Proto)
t.Logf("called %s with proto %s", r.URL.Path, r.Proto)
responseTags := fakeTagsList{
Tags: []string{"1.0.0"},
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
require.NoError(t, json.NewEncoder(w).Encode(responseTags))
}))
// httptest.NewTLSServer only advertises http/1.1 in ALPN, so we must
// configure the server to also offer h2 for HTTP/2 negotiation to work.
server.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}}
server.StartTLS()
t.Cleanup(server.Close)
client := NewClient(server.URL, HelmCreds{InsecureSkipVerify: true}, true, "", "")
tags, err := client.GetTags("mychart", true)
require.NoError(t, err)
assert.Equal(t, []string{"1.0.0"}, tags)
// Verify that at least one request used HTTP/2. When ForceAttemptHTTP2 is
// not set on the Transport, Go's TLS stack won't negotiate h2 even though
// the server supports it, because a custom TLSClientConfig disables the
// automatic HTTP/2 setup.
require.NotEmpty(t, requestProtos, "expected at least one request to the server")
hasHTTP2 := slices.Contains(requestProtos, "HTTP/2.0")
assert.True(t, hasHTTP2, "expected at least one HTTP/2 request, but got protocols: %v", requestProtos)
})
}
func TestLoadRepoIndexUsesHTTP2(t *testing.T) {
t.Run("should negotiate HTTP/2 when fetching index", func(t *testing.T) {
var requestProto string
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestProto = r.Proto
t.Logf("called %s with proto %s", r.URL.Path, r.Proto)
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(`apiVersion: v1
entries: {}
`))
}))
server.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}}
server.StartTLS()
t.Cleanup(server.Close)
client := NewClient(server.URL, HelmCreds{InsecureSkipVerify: true}, false, "", "")
_, err := client.GetIndex(false, 10000)
require.NoError(t, err)
assert.Equal(t, "HTTP/2.0", requestProto, "expected HTTP/2 request for index fetch, but got %s", requestProto)
})
}
func TestUserAgentIsSet(t *testing.T) {
t.Run("Default User-Agent for traditional Helm repo", func(t *testing.T) {
// Create a test server that captures the User-Agent header

View File

@@ -327,12 +327,8 @@ func (c *Cmd) PullOCI(repo string, chart string, version string, destination str
return out, nil
}
func (c *Cmd) dependencyBuild(insecure bool) (string, error) {
args := []string{"dependency", "build"}
if insecure {
args = append(args, "--insecure-skip-tls-verify")
}
out, _, err := c.run(context.Background(), args...)
func (c *Cmd) dependencyBuild() (string, error) {
out, _, err := c.run(context.Background(), "dependency", "build")
if err != nil {
return "", fmt.Errorf("failed to build dependencies: %w", err)
}

View File

@@ -135,36 +135,6 @@ func TestRegistryLogin(t *testing.T) {
}
}
func TestDependencyBuild(t *testing.T) {
tests := []struct {
name string
insecure bool
expectedOut string
}{
{
name: "without insecure",
insecure: false,
expectedOut: "helm dependency build",
},
{
name: "with insecure",
insecure: true,
expectedOut: "helm dependency build --insecure-skip-tls-verify",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
c, err := newCmdWithVersion(".", false, "", "", func(cmd *exec.Cmd, _ func(_ string) string) (string, error) {
return strings.Join(cmd.Args, " "), nil
})
require.NoError(t, err)
out, err := c.dependencyBuild(tc.insecure)
require.NoError(t, err)
assert.Equal(t, tc.expectedOut, out)
})
}
}
func TestRegistryLogout(t *testing.T) {
tests := []struct {
name string

View File

@@ -43,21 +43,20 @@ type Helm interface {
}
// NewHelmApp create a new wrapper to run commands on the `helm` command-line tool.
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool, insecure bool) (Helm, error) {
func NewHelmApp(workDir string, repos []HelmRepository, isLocal bool, version string, proxy string, noProxy string, passCredentials bool) (Helm, error) {
cmd, err := NewCmd(workDir, version, proxy, noProxy)
if err != nil {
return nil, fmt.Errorf("failed to create new helm command: %w", err)
}
cmd.IsLocal = isLocal
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials, insecure: insecure}, nil
return &helm{repos: repos, cmd: *cmd, passCredentials: passCredentials}, nil
}
type helm struct {
cmd Cmd
repos []HelmRepository
passCredentials bool
insecure bool
}
var _ Helm = &helm{}
@@ -109,7 +108,7 @@ func (h *helm) DependencyBuild() error {
}
}
h.repos = nil
_, err := h.cmd.dependencyBuild(h.insecure)
_, err := h.cmd.dependencyBuild()
if err != nil {
return fmt.Errorf("failed to build helm dependencies: %w", err)
}

View File

@@ -25,7 +25,7 @@ func template(h Helm, opts *TemplateOpts) ([]*unstructured.Unstructured, error)
}
func TestHelmTemplateParams(t *testing.T) {
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false, false)
h, err := NewHelmApp("./testdata/minio", []HelmRepository{}, false, "", "", "", false)
require.NoError(t, err)
opts := TemplateOpts{
Name: "test",
@@ -58,7 +58,7 @@ func TestHelmTemplateValues(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false, false)
h, err := NewHelmApp(repoRootAbs, []HelmRepository{}, false, "", "", "", false)
require.NoError(t, err)
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
require.NoError(t, err)
@@ -84,7 +84,7 @@ func TestHelmGetParams(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
require.NoError(t, err)
params, err := h.GetParameters(nil, repoRootAbs, repoRootAbs)
require.NoError(t, err)
@@ -97,7 +97,7 @@ func TestHelmGetParamsValueFiles(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
require.NoError(t, err)
valuesPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-production.yaml", nil)
require.NoError(t, err)
@@ -112,7 +112,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
repoRoot := "./testdata/redis"
repoRootAbs, err := filepath.Abs(repoRoot)
require.NoError(t, err)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false, false)
h, err := NewHelmApp(repoRootAbs, nil, false, "", "", "", false)
require.NoError(t, err)
valuesMissingPath, _, err := path.ResolveValueFilePathOrUrl(repoRootAbs, repoRootAbs, "values-missing.yaml", nil)
require.NoError(t, err)
@@ -126,7 +126,7 @@ func TestHelmGetParamsValueFilesThatExist(t *testing.T) {
}
func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{Name: "my-release"})
@@ -144,7 +144,7 @@ func TestHelmTemplateReleaseNameOverwrite(t *testing.T) {
}
func TestHelmTemplateReleaseName(t *testing.T) {
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false, false)
h, err := NewHelmApp("./testdata/redis", nil, false, "", "", "", false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{Name: "test"})
require.NoError(t, err)
@@ -206,7 +206,7 @@ func Test_flatVals(t *testing.T) {
}
func TestAPIVersions(t *testing.T) {
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false, false)
h, err := NewHelmApp("./testdata/api-versions", nil, false, "", "", "", false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{})
@@ -221,7 +221,7 @@ func TestAPIVersions(t *testing.T) {
}
func TestKubeVersionWithSymbol(t *testing.T) {
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{KubeVersion: "1.30.11+IKS"})
@@ -244,7 +244,7 @@ func TestKubeVersionWithSymbol(t *testing.T) {
}
func TestSkipCrds(t *testing.T) {
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false, false)
h, err := NewHelmApp("./testdata/crds", nil, false, "", "", "", false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{SkipCrds: false})
@@ -261,7 +261,7 @@ func TestSkipCrds(t *testing.T) {
}
func TestSkipTests(t *testing.T) {
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false, false)
h, err := NewHelmApp("./testdata/tests", nil, false, "", "", "", false)
require.NoError(t, err)
objs, err := template(h, &TemplateOpts{SkipTests: false})

View File

@@ -143,7 +143,6 @@ func NewClientWithLock(repoURL string, creds Creds, repoLock sync.KeyLock, proxy
Proxy: proxy.GetCallback(proxyURL, noProxy),
TLSClientConfig: tlsConf,
DisableKeepAlives: true,
ForceAttemptHTTP2: true,
},
/*
CheckRedirect: func(req *http.Request, via []*http.Request) error {

View File

@@ -5,22 +5,16 @@ import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
"errors"
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"slices"
"testing"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"oras.land/oras-go/v2"
"oras.land/oras-go/v2/content"
@@ -767,38 +761,6 @@ func Test_nativeOCIClient_ResolveRevision(t *testing.T) {
}
}
func TestNewClientUsesHTTP2(t *testing.T) {
t.Run("should negotiate HTTP/2 when TLS is configured", func(t *testing.T) {
var requestProtos []string
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestProtos = append(requestProtos, r.Proto)
t.Logf("called %s with proto %s", r.URL.Path, r.Proto)
w.WriteHeader(http.StatusOK)
}))
// httptest.NewTLSServer only advertises http/1.1 in ALPN, so we must
// configure the server to also offer h2 for HTTP/2 negotiation to work.
server.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}}
server.StartTLS()
t.Cleanup(server.Close)
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
// NewClient expects oci://host/path format.
repoURL := "oci://" + serverURL.Host + "/myorg/myrepo"
client, err := NewClient(repoURL, Creds{InsecureSkipVerify: true}, "", "", nil,
WithEventHandlers(fakeEventHandlers(t, serverURL.Host+"/myorg/myrepo")))
require.NoError(t, err)
// TestRepo pings the registry's /v2/ endpoint, exercising the transport.
_, _ = client.TestRepo(t.Context())
require.NotEmpty(t, requestProtos, "expected at least one request to the server")
hasHTTP2 := slices.Contains(requestProtos, "HTTP/2.0")
assert.True(t, hasHTTP2, "expected at least one HTTP/2 request, but got protocols: %v", requestProtos)
})
}
func fakeEventHandlers(t *testing.T, repoURL string) EventHandlers {
t.Helper()
return EventHandlers{
@@ -810,9 +772,6 @@ func fakeEventHandlers(t *testing.T, repoURL string) EventHandlers {
OnGetTagsFail: func(repo string) func() {
return func() { require.Equal(t, repoURL, repo) }
},
OnTestRepoFail: func(repo string) func() {
return func() { require.Equal(t, repoURL, repo) }
},
OnExtractFail: func(repo string) func(revision string) {
return func(_ string) { require.Equal(t, repoURL, repo) }
},

View File

@@ -1069,7 +1069,3 @@ func FormatAccessTokenCacheKey(sub string) string {
func formatOidcTokenCacheKey(sub string, sid string) string {
return fmt.Sprintf("%s_%s_%s", OidcTokenCachePrefix, sub, sid)
}
func (a *ClientApp) IssuerURL() string {
return a.issuerURL
}

View File

@@ -1,10 +1,11 @@
//go:build race
package settings
import (
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"sync/atomic"
"testing"
@@ -41,7 +42,7 @@ func TestClusterInformer_ConcurrentAccess(t *testing.T) {
},
}
clientset := fake.NewClientset(secret1)
clientset := fake.NewSimpleClientset(secret1)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -49,15 +50,16 @@ func TestClusterInformer_ConcurrentAccess(t *testing.T) {
cache.WaitForCacheSync(ctx.Done(), informer.HasSynced)
var wg sync.WaitGroup
for range 100 {
wg.Go(func() {
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
cluster, err := informer.GetClusterByURL("https://cluster1.example.com")
// require calls t.FailNow(), which only stops the current goroutine, not the test
assert.NoError(t, err) //nolint:testifylint
assert.NoError(t, err)
assert.NotNil(t, cluster)
// Modifying returned cluster should not affect others due to DeepCopy
cluster.Name = "modified"
})
}()
}
wg.Wait()
@@ -85,7 +87,7 @@ func TestClusterInformer_TransformErrors(t *testing.T) {
},
}
clientset := fake.NewClientset(badSecret)
clientset := fake.NewSimpleClientset(badSecret)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -94,12 +96,12 @@ func TestClusterInformer_TransformErrors(t *testing.T) {
// GetClusterByURL should return not found since transform failed
_, err = informer.GetClusterByURL("https://bad.example.com")
require.Error(t, err)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not found")
// ListClusters should return an error because the cache contains a secret and not a cluster
_, err = informer.ListClusters()
require.Error(t, err)
assert.Error(t, err)
assert.Contains(t, err.Error(), "cluster cache contains unexpected type")
}
@@ -138,7 +140,7 @@ func TestClusterInformer_TransformErrors_MixedSecrets(t *testing.T) {
},
}
clientset := fake.NewClientset(goodSecret, badSecret)
clientset := fake.NewSimpleClientset(goodSecret, badSecret)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -152,7 +154,7 @@ func TestClusterInformer_TransformErrors_MixedSecrets(t *testing.T) {
// But ListClusters should fail because there's a bad secret in the cache
_, err = informer.ListClusters()
require.Error(t, err)
assert.Error(t, err)
assert.Contains(t, err.Error(), "cluster cache contains unexpected type")
}
@@ -175,7 +177,7 @@ func TestClusterInformer_DynamicUpdates(t *testing.T) {
},
}
clientset := fake.NewClientset(secret1)
clientset := fake.NewSimpleClientset(secret1)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -233,7 +235,7 @@ func TestClusterInformer_URLNormalization(t *testing.T) {
},
}
clientset := fake.NewClientset(secret)
clientset := fake.NewSimpleClientset(secret)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -288,7 +290,7 @@ func TestClusterInformer_GetClusterServersByName(t *testing.T) {
},
}
clientset := fake.NewClientset(secrets...)
clientset := fake.NewSimpleClientset(secrets...)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -307,7 +309,7 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
defer cancel()
var secrets []*corev1.Secret
for i := range 10 {
for i := 0; i < 10; i++ {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("cluster-%d", i),
@@ -317,15 +319,15 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
},
},
Data: map[string][]byte{
"server": fmt.Appendf(nil, "https://cluster%d.example.com", i),
"name": fmt.Appendf(nil, "cluster-%d", i),
"server": []byte(fmt.Sprintf("https://cluster%d.example.com", i)),
"name": []byte(fmt.Sprintf("cluster-%d", i)),
"config": []byte(`{"bearerToken":"token"}`),
},
}
secrets = append(secrets, secret)
}
clientset := fake.NewClientset()
clientset := fake.NewSimpleClientset()
for _, secret := range secrets {
_, err := clientset.CoreV1().Secrets("argocd").Create(t.Context(), secret, metav1.CreateOptions{})
require.NoError(t, err)
@@ -340,11 +342,11 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
var wg sync.WaitGroup
var readErrors, updateErrors atomic.Int64
for i := range 50 {
for i := 0; i < 50; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := range 100 {
for j := 0; j < 100; j++ {
clusterID := j % 10
url := fmt.Sprintf("https://cluster%d.example.com", clusterID)
@@ -374,13 +376,13 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
}(i)
}
for i := range 10 {
for i := 0; i < 10; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := range 20 {
for j := 0; j < 20; j++ {
secret := secrets[id%10].DeepCopy()
secret.Data["name"] = fmt.Appendf(nil, "updated-%d-%d", id, j)
secret.Data["name"] = []byte(fmt.Sprintf("updated-%d-%d", id, j))
_, err := clientset.CoreV1().Secrets("argocd").Update(t.Context(), secret, metav1.UpdateOptions{})
if err != nil {
@@ -391,9 +393,11 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
}(i)
}
for range 20 {
wg.Go(func() {
for range 50 {
for i := 0; i < 20; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 50; j++ {
clusters, err := informer.ListClusters()
if err != nil {
readErrors.Add(1)
@@ -408,7 +412,7 @@ func TestClusterInformer_RaceCondition(t *testing.T) {
}
time.Sleep(5 * time.Millisecond)
}
})
}()
}
wg.Wait()
@@ -440,7 +444,7 @@ func TestClusterInformer_DeepCopyIsolation(t *testing.T) {
},
}
clientset := fake.NewClientset(secret)
clientset := fake.NewSimpleClientset(secret)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -479,13 +483,12 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
name: "Empty namespace - no clusters",
secrets: []runtime.Object{},
testFunc: func(t *testing.T, informer *ClusterInformer) {
t.Helper()
clusters, err := informer.ListClusters()
require.NoError(t, err)
assert.Empty(t, clusters)
_, err = informer.GetClusterByURL("https://nonexistent.example.com")
require.Error(t, err)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not found")
},
},
@@ -508,10 +511,9 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
},
},
testFunc: func(t *testing.T, informer *ClusterInformer) {
t.Helper()
cluster, err := informer.GetClusterByURL("https://noname.example.com")
require.NoError(t, err)
assert.Empty(t, cluster.Name)
assert.Equal(t, "", cluster.Name)
servers, err := informer.GetClusterServersByName("")
require.NoError(t, err)
@@ -537,7 +539,6 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
},
},
testFunc: func(t *testing.T, informer *ClusterInformer) {
t.Helper()
cluster, err := informer.GetClusterByURL("https://cluster.example.com:8443/path/")
require.NoError(t, err)
assert.Equal(t, "special", cluster.Name)
@@ -577,7 +578,6 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
},
},
testFunc: func(t *testing.T, informer *ClusterInformer) {
t.Helper()
cluster, err := informer.GetClusterByURL("https://duplicate.example.com")
require.NoError(t, err)
assert.NotNil(t, cluster)
@@ -598,21 +598,20 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
"server": []byte("https://many-ns.example.com"),
"name": []byte("many-ns"),
"namespaces": func() []byte {
var sb strings.Builder
for i := range 100 {
ns := ""
for i := 0; i < 100; i++ {
if i > 0 {
sb.WriteString(",")
ns += ","
}
fmt.Fprintf(&sb, "namespace-%d", i)
ns += fmt.Sprintf("namespace-%d", i)
}
return []byte(sb.String())
return []byte(ns)
}(),
"config": []byte(`{}`),
},
},
},
testFunc: func(t *testing.T, informer *ClusterInformer) {
t.Helper()
cluster, err := informer.GetClusterByURL("https://many-ns.example.com")
require.NoError(t, err)
assert.Len(t, cluster.Namespaces, 100)
@@ -649,7 +648,6 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
},
},
testFunc: func(t *testing.T, informer *ClusterInformer) {
t.Helper()
cluster, err := informer.GetClusterByURL("https://annotated.example.com")
require.NoError(t, err)
@@ -678,7 +676,7 @@ func TestClusterInformer_EdgeCases(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
clientset := fake.NewClientset(tt.secrets...)
clientset := fake.NewSimpleClientset(tt.secrets...)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -722,7 +720,7 @@ func TestClusterInformer_SecretDeletion(t *testing.T) {
},
}
clientset := fake.NewClientset(secret1, secret2)
clientset := fake.NewSimpleClientset(secret1, secret2)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -803,7 +801,7 @@ func TestClusterInformer_ComplexConfig(t *testing.T) {
},
}
clientset := fake.NewClientset(secret)
clientset := fake.NewSimpleClientset(secret)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(t, err)
@@ -816,8 +814,8 @@ func TestClusterInformer_ComplexConfig(t *testing.T) {
assert.Equal(t, "admin", cluster.Config.Username)
assert.Equal(t, "password123", cluster.Config.Password)
assert.Equal(t, "bearer-token", cluster.Config.BearerToken)
assert.True(t, cluster.Config.Insecure)
assert.Equal(t, "cluster.internal", cluster.Config.ServerName)
assert.True(t, cluster.Config.TLSClientConfig.Insecure)
assert.Equal(t, "cluster.internal", cluster.Config.TLSClientConfig.ServerName)
assert.NotNil(t, cluster.Config.AWSAuthConfig)
assert.Equal(t, "eks-cluster", cluster.Config.AWSAuthConfig.ClusterName)
@@ -836,7 +834,7 @@ func BenchmarkClusterInformer_GetClusterByURL(b *testing.B) {
defer cancel()
var secrets []runtime.Object
for i := range 1000 {
for i := 0; i < 1000; i++ {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("cluster-%d", i),
@@ -846,15 +844,15 @@ func BenchmarkClusterInformer_GetClusterByURL(b *testing.B) {
},
},
Data: map[string][]byte{
"server": fmt.Appendf(nil, "https://cluster%d.example.com", i),
"name": fmt.Appendf(nil, "cluster-%d", i),
"server": []byte(fmt.Sprintf("https://cluster%d.example.com", i)),
"name": []byte(fmt.Sprintf("cluster-%d", i)),
"config": []byte(`{"bearerToken":"token"}`),
},
}
secrets = append(secrets, secret)
}
clientset := fake.NewClientset(secrets...)
clientset := fake.NewSimpleClientset(secrets...)
informer, err := NewClusterInformer(clientset, "argocd")
require.NoError(b, err)

View File

@@ -1,50 +0,0 @@
package settings
import (
"fmt"
"strings"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v3/util/glob"
)
const (
// serviceAccountDisallowedCharSet contains the characters that are not allowed to be present
// in a DefaultServiceAccount configured for a DestinationServiceAccount
serviceAccountDisallowedCharSet = "!*[]{}\\/"
)
// DeriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
func DeriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application, destCluster *v1alpha1.Cluster) (string, error) {
// spec.Destination.Namespace is optional. If not specified, use the Application's
// namespace
serviceAccountNamespace := application.Spec.Destination.Namespace
if serviceAccountNamespace == "" {
serviceAccountNamespace = application.Namespace
}
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
// if so, return the service account specified for that destination.
for _, item := range project.Spec.DestinationServiceAccounts {
dstServerMatched, err := glob.MatchWithError(item.Server, destCluster.Server)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
}
dstNamespaceMatched, err := glob.MatchWithError(item.Namespace, application.Spec.Destination.Namespace)
if err != nil {
return "", fmt.Errorf("invalid glob pattern for destination namespace: %w", err)
}
if dstServerMatched && dstNamespaceMatched {
if strings.Trim(item.DefaultServiceAccount, " ") == "" || strings.ContainsAny(item.DefaultServiceAccount, serviceAccountDisallowedCharSet) {
return "", fmt.Errorf("default service account contains invalid chars '%s'", item.DefaultServiceAccount)
} else if strings.Contains(item.DefaultServiceAccount, ":") {
// service account is specified along with its namespace.
return "system:serviceaccount:" + item.DefaultServiceAccount, nil
}
// service account needs to be prefixed with a namespace
return fmt.Sprintf("system:serviceaccount:%s:%s", serviceAccountNamespace, item.DefaultServiceAccount), nil
}
}
// if there is no match found in the AppProject.Spec.DestinationServiceAccounts, use the default service account of the destination namespace.
return "", fmt.Errorf("no matching service account found for destination server %s and namespace %s", application.Spec.Destination.Server, serviceAccountNamespace)
}

View File

@@ -1,268 +0,0 @@
package settings
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
)
func TestDeriveServiceAccountToImpersonate(t *testing.T) {
t.Run("MatchingServerAndNamespace", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "https://cluster-api.example.com", Namespace: "dest-ns", DefaultServiceAccount: "test-sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
require.NoError(t, err)
assert.Equal(t, "system:serviceaccount:dest-ns:test-sa", user)
})
t.Run("MatchingWithGlobPatterns", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "*", Namespace: "*", DefaultServiceAccount: "test-sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "any-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
require.NoError(t, err)
assert.Equal(t, "system:serviceaccount:any-ns:test-sa", user)
})
t.Run("MatchingWithNamespacedServiceAccount", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "https://cluster-api.example.com", Namespace: "dest-ns", DefaultServiceAccount: "other-ns:deploy-sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
require.NoError(t, err)
assert.Equal(t, "system:serviceaccount:other-ns:deploy-sa", user)
})
t.Run("FallbackToAppNamespaceWhenDestEmpty", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
// Namespace pattern matches empty string via glob "*"
{Server: "*", Namespace: "", DefaultServiceAccount: "test-sa"},
},
},
}
app := &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{Namespace: "app-ns"},
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
require.NoError(t, err)
// Should use app.Namespace ("app-ns") as the SA namespace since Destination.Namespace is empty
assert.Equal(t, "system:serviceaccount:app-ns:test-sa", user)
})
t.Run("NoMatchingEntry", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "https://other-server.com", Namespace: "other-ns", DefaultServiceAccount: "test-sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
assert.Empty(t, user)
assert.ErrorContains(t, err, "no matching service account found")
})
t.Run("EmptyDestinationServiceAccounts", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
assert.Empty(t, user)
assert.ErrorContains(t, err, "no matching service account found")
})
t.Run("InvalidServiceAccountChars", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "*", Namespace: "*", DefaultServiceAccount: "bad*sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
assert.Empty(t, user)
assert.ErrorContains(t, err, "default service account contains invalid chars")
})
t.Run("BlankServiceAccount", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "*", Namespace: "*", DefaultServiceAccount: " "},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
assert.Empty(t, user)
assert.ErrorContains(t, err, "default service account contains invalid chars")
})
t.Run("InvalidServerGlobPattern", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "[", Namespace: "dest-ns", DefaultServiceAccount: "test-sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
assert.Empty(t, user)
assert.ErrorContains(t, err, "invalid glob pattern for destination server")
})
t.Run("InvalidNamespaceGlobPattern", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "*", Namespace: "[", DefaultServiceAccount: "test-sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
assert.Empty(t, user)
assert.ErrorContains(t, err, "invalid glob pattern for destination namespace")
})
t.Run("FirstMatchWins", func(t *testing.T) {
project := &v1alpha1.AppProject{
Spec: v1alpha1.AppProjectSpec{
DestinationServiceAccounts: []v1alpha1.ApplicationDestinationServiceAccount{
{Server: "*", Namespace: "dest-ns", DefaultServiceAccount: "first-sa"},
{Server: "*", Namespace: "*", DefaultServiceAccount: "second-sa"},
},
},
}
app := &v1alpha1.Application{
Spec: v1alpha1.ApplicationSpec{
Destination: v1alpha1.ApplicationDestination{
Server: "https://cluster-api.example.com",
Namespace: "dest-ns",
},
},
}
cluster := &v1alpha1.Cluster{Server: "https://cluster-api.example.com"}
user, err := DeriveServiceAccountToImpersonate(project, app, cluster)
require.NoError(t, err)
assert.Equal(t, "system:serviceaccount:dest-ns:first-sa", user)
})
}

View File

@@ -119,6 +119,8 @@ type ArgoCDSettings struct {
PasswordPattern string `json:"passwordPattern,omitempty"`
// BinaryUrls contains the URLs for downloading argocd binaries
BinaryUrls map[string]string `json:"binaryUrls,omitempty"`
// InClusterEnabled indicates whether to allow in-cluster server address
InClusterEnabled bool `json:"inClusterEnabled"`
// ServerRBACLogEnforceEnable temporary var indicates whether rbac will be enforced on logs
ServerRBACLogEnforceEnable bool `json:"serverRBACLogEnforceEnable"`
// MaxPodLogsToRender the maximum number of pod logs to render
@@ -559,10 +561,6 @@ const (
// application sync with impersonation feature is disabled by default.
defaultImpersonationEnabledFlag = false
// defaultInClusterEnabledFlag is the default value when the in-cluster setting
// cannot be read from the configmap or is not explicitly set by the user.
defaultInClusterEnabledFlag = true
)
var sourceTypeToEnableGenerationKey = map[v1alpha1.ApplicationSourceType]string{
@@ -663,11 +661,10 @@ func (mgr *SettingsManager) GetSecretsInformer() (cache.SharedIndexInformer, err
}
// GetClusterInformer returns the cluster cache for optimized cluster lookups.
func (mgr *SettingsManager) GetClusterInformer() (*ClusterInformer, error) {
if err := mgr.ensureSynced(false); err != nil {
return nil, fmt.Errorf("error ensuring that the settings manager is synced: %w", err)
}
return mgr.clusterInformer, nil
func (mgr *SettingsManager) GetClusterInformer() *ClusterInformer {
// Ensure the settings manager is initialized
_ = mgr.ensureSynced(false)
return mgr.clusterInformer
}
func (mgr *SettingsManager) updateSecret(callback func(*corev1.Secret) error) error {
@@ -1338,10 +1335,10 @@ func (mgr *SettingsManager) GetSettings() (*ArgoCDSettings, error) {
if err := mgr.updateSettingsFromSecret(&settings, argoCDSecret, secrets); err != nil {
errs = append(errs, err)
}
updateSettingsFromConfigMap(&settings, argoCDCM)
if len(errs) > 0 {
return &settings, errors.Join(errs...)
}
updateSettingsFromConfigMap(&settings, argoCDCM)
return &settings, nil
}
@@ -1530,6 +1527,7 @@ func updateSettingsFromConfigMap(settings *ArgoCDSettings, argoCDCM *corev1.Conf
settings.MaxPodLogsToRender = val
}
}
settings.InClusterEnabled = argoCDCM.Data[inClusterEnabledKey] != "false"
settings.ExecEnabled = argoCDCM.Data[execEnabledKey] == "true"
execShells := argoCDCM.Data[execShellsKey]
if execShells != "" {
@@ -2429,15 +2427,3 @@ func (mgr *SettingsManager) GetAllowedNodeLabels() []string {
}
return labelKeys
}
// IsInClusterEnabled returns false if in-cluster is explicitly disabled in argocd-cm configmap, true otherwise
func (mgr *SettingsManager) IsInClusterEnabled() (bool, error) {
argoCDCM, err := mgr.getConfigMap()
if err != nil {
return defaultInClusterEnabledFlag, fmt.Errorf("error checking %s property in configmap: %w", inClusterEnabledKey, err)
}
if inClusterEnabled, ok := argoCDCM.Data[inClusterEnabledKey]; ok {
return inClusterEnabled != "false", nil
}
return defaultInClusterEnabledFlag, nil
}

View File

@@ -178,13 +178,6 @@ func TestInClusterServerAddressEnabled(t *testing.T) {
}
func TestInClusterServerAddressEnabledByDefault(t *testing.T) {
_, settingsManager := fixtures(t.Context(), map[string]string{})
enabled, err := settingsManager.IsInClusterEnabled()
require.NoError(t, err)
require.True(t, enabled)
}
func TestGetSettings_InClusterIsEnabledWithMissingServerSecretKey(t *testing.T) {
kubeClient := fake.NewClientset(
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -205,15 +198,15 @@ func TestGetSettings_InClusterIsEnabledWithMissingServerSecretKey(t *testing.T)
},
},
Data: map[string][]byte{
"admin.password": nil,
"admin.password": nil,
"server.secretkey": nil,
},
},
)
settingsManager := NewSettingsManager(t.Context(), kubeClient, "default")
// IsInClusterEnabled reads ConfigMap directly and does not depend on server.secretkey
enabled, err := settingsManager.IsInClusterEnabled()
settings, err := settingsManager.GetSettings()
require.NoError(t, err)
require.True(t, enabled)
assert.True(t, settings.InClusterEnabled)
}
func TestGetAppInstanceLabelKey(t *testing.T) {
@@ -2171,49 +2164,6 @@ func TestIsImpersonationEnabled(t *testing.T) {
"when user enables the flag in argocd-cm config map, IsImpersonationEnabled() must not return any error")
}
func TestIsInClusterEnabled(t *testing.T) {
// When there is no argocd-cm itself,
// Then IsInClusterEnabled() must return true (default value) and an error with appropriate error message.
kubeClient := fake.NewClientset()
settingsManager := NewSettingsManager(t.Context(), kubeClient, "default")
enabled, err := settingsManager.IsInClusterEnabled()
require.True(t, enabled,
"with no argocd-cm config map, IsInClusterEnabled() must return true (default value)")
require.ErrorContains(t, err, "configmap \"argocd-cm\" not found",
"with no argocd-cm config map, IsInClusterEnabled() must return an error")
// When there is no in-cluster flag present in the argocd-cm,
// Then IsInClusterEnabled() must return true (default value) and nil error.
_, settingsManager = fixtures(t.Context(), map[string]string{})
enabled, err = settingsManager.IsInClusterEnabled()
require.True(t, enabled,
"with empty argocd-cm config map, IsInClusterEnabled() must return true (default value)")
require.NoError(t, err,
"with empty argocd-cm config map, IsInClusterEnabled() must not return any error")
// When user disables in-cluster explicitly,
// Then IsInClusterEnabled() must return false and nil error.
_, settingsManager = fixtures(t.Context(), map[string]string{
"cluster.inClusterEnabled": "false",
})
enabled, err = settingsManager.IsInClusterEnabled()
require.False(t, enabled,
"when user sets the flag to false in argocd-cm config map, IsInClusterEnabled() must return false")
require.NoError(t, err,
"when user sets the flag to false in argocd-cm config map, IsInClusterEnabled() must not return any error")
// When user enables in-cluster explicitly,
// Then IsInClusterEnabled() must return true and nil error.
_, settingsManager = fixtures(t.Context(), map[string]string{
"cluster.inClusterEnabled": "true",
})
enabled, err = settingsManager.IsInClusterEnabled()
require.True(t, enabled,
"when user sets the flag to true in argocd-cm config map, IsInClusterEnabled() must return true")
require.NoError(t, err,
"when user sets the flag to true in argocd-cm config map, IsInClusterEnabled() must not return any error")
}
func TestRequireOverridePrivilegeForRevisionSyncNoConfigMap(t *testing.T) {
// When there is no argocd-cm itself,
// Then RequireOverridePrivilegeForRevisionSync() must return false (default value) and an error with appropriate error message.